genfs_vnops.c revision 1.11.4.7 1 1.11.4.7 perseant /* $NetBSD: genfs_vnops.c,v 1.11.4.7 1999/08/31 21:03:44 perseant Exp $ */
2 1.6 fvdl
3 1.6 fvdl /*
4 1.6 fvdl * Copyright (c) 1982, 1986, 1989, 1993
5 1.6 fvdl * The Regents of the University of California. All rights reserved.
6 1.6 fvdl *
7 1.6 fvdl * Redistribution and use in source and binary forms, with or without
8 1.6 fvdl * modification, are permitted provided that the following conditions
9 1.6 fvdl * are met:
10 1.6 fvdl * 1. Redistributions of source code must retain the above copyright
11 1.6 fvdl * notice, this list of conditions and the following disclaimer.
12 1.6 fvdl * 2. Redistributions in binary form must reproduce the above copyright
13 1.6 fvdl * notice, this list of conditions and the following disclaimer in the
14 1.6 fvdl * documentation and/or other materials provided with the distribution.
15 1.6 fvdl * 3. All advertising materials mentioning features or use of this software
16 1.6 fvdl * must display the following acknowledgement:
17 1.6 fvdl * This product includes software developed by the University of
18 1.6 fvdl * California, Berkeley and its contributors.
19 1.6 fvdl * 4. Neither the name of the University nor the names of its contributors
20 1.6 fvdl * may be used to endorse or promote products derived from this software
21 1.6 fvdl * without specific prior written permission.
22 1.6 fvdl *
23 1.6 fvdl * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 1.6 fvdl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 1.6 fvdl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 1.6 fvdl * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 1.6 fvdl * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 1.6 fvdl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 1.6 fvdl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 1.6 fvdl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 1.6 fvdl * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 1.6 fvdl * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 1.6 fvdl * SUCH DAMAGE.
34 1.6 fvdl *
35 1.6 fvdl */
36 1.5 perry
37 1.8 thorpej #include "opt_nfsserver.h"
38 1.8 thorpej
39 1.1 mycroft #include <sys/param.h>
40 1.1 mycroft #include <sys/systm.h>
41 1.6 fvdl #include <sys/proc.h>
42 1.1 mycroft #include <sys/kernel.h>
43 1.1 mycroft #include <sys/mount.h>
44 1.1 mycroft #include <sys/namei.h>
45 1.1 mycroft #include <sys/vnode.h>
46 1.1 mycroft #include <sys/malloc.h>
47 1.3 mycroft #include <sys/poll.h>
48 1.1 mycroft
49 1.1 mycroft #include <miscfs/genfs/genfs.h>
50 1.6 fvdl #include <miscfs/specfs/specdev.h>
51 1.1 mycroft
52 1.11.4.1 chs #include <vm/vm.h>
53 1.11.4.1 chs #include <uvm/uvm.h>
54 1.11.4.7 perseant #include <uvm/uvm_pager.h>
55 1.11.4.1 chs
56 1.8 thorpej #ifdef NFSSERVER
57 1.8 thorpej #include <nfs/rpcv2.h>
58 1.8 thorpej #include <nfs/nfsproto.h>
59 1.8 thorpej #include <nfs/nfs.h>
60 1.8 thorpej #include <nfs/nqnfs.h>
61 1.8 thorpej #include <nfs/nfs_var.h>
62 1.8 thorpej #endif
63 1.8 thorpej
64 1.1 mycroft int
65 1.3 mycroft genfs_poll(v)
66 1.1 mycroft void *v;
67 1.1 mycroft {
68 1.3 mycroft struct vop_poll_args /* {
69 1.1 mycroft struct vnode *a_vp;
70 1.3 mycroft int a_events;
71 1.1 mycroft struct proc *a_p;
72 1.1 mycroft } */ *ap = v;
73 1.1 mycroft
74 1.3 mycroft return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
75 1.1 mycroft }
76 1.1 mycroft
77 1.1 mycroft int
78 1.1 mycroft genfs_fsync(v)
79 1.1 mycroft void *v;
80 1.1 mycroft {
81 1.1 mycroft struct vop_fsync_args /* {
82 1.1 mycroft struct vnode *a_vp;
83 1.1 mycroft struct ucred *a_cred;
84 1.7 kleink int a_flags;
85 1.1 mycroft struct proc *a_p;
86 1.1 mycroft } */ *ap = v;
87 1.1 mycroft register struct vnode *vp = ap->a_vp;
88 1.11 mycroft int wait;
89 1.1 mycroft
90 1.11 mycroft wait = (ap->a_flags & FSYNC_WAIT) != 0;
91 1.11 mycroft vflushbuf(vp, wait);
92 1.11 mycroft if ((ap->a_flags & FSYNC_DATAONLY) != 0)
93 1.7 kleink return (0);
94 1.11 mycroft else
95 1.11 mycroft return (VOP_UPDATE(ap->a_vp, NULL, NULL, wait));
96 1.1 mycroft }
97 1.1 mycroft
98 1.1 mycroft int
99 1.4 kleink genfs_seek(v)
100 1.4 kleink void *v;
101 1.4 kleink {
102 1.4 kleink struct vop_seek_args /* {
103 1.4 kleink struct vnode *a_vp;
104 1.4 kleink off_t a_oldoff;
105 1.4 kleink off_t a_newoff;
106 1.4 kleink struct ucred *a_ucred;
107 1.4 kleink } */ *ap = v;
108 1.4 kleink
109 1.4 kleink if (ap->a_newoff < 0)
110 1.4 kleink return (EINVAL);
111 1.4 kleink
112 1.4 kleink return (0);
113 1.4 kleink }
114 1.4 kleink
115 1.4 kleink int
116 1.1 mycroft genfs_abortop(v)
117 1.1 mycroft void *v;
118 1.1 mycroft {
119 1.1 mycroft struct vop_abortop_args /* {
120 1.1 mycroft struct vnode *a_dvp;
121 1.1 mycroft struct componentname *a_cnp;
122 1.1 mycroft } */ *ap = v;
123 1.1 mycroft
124 1.1 mycroft if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
125 1.1 mycroft FREE(ap->a_cnp->cn_pnbuf, M_NAMEI);
126 1.1 mycroft return (0);
127 1.1 mycroft }
128 1.1 mycroft
129 1.1 mycroft /*ARGSUSED*/
130 1.1 mycroft int
131 1.1 mycroft genfs_badop(v)
132 1.1 mycroft void *v;
133 1.1 mycroft {
134 1.1 mycroft
135 1.1 mycroft panic("genfs: bad op");
136 1.1 mycroft }
137 1.1 mycroft
138 1.1 mycroft /*ARGSUSED*/
139 1.1 mycroft int
140 1.1 mycroft genfs_nullop(v)
141 1.1 mycroft void *v;
142 1.1 mycroft {
143 1.1 mycroft
144 1.1 mycroft return (0);
145 1.10 kleink }
146 1.10 kleink
147 1.10 kleink /*ARGSUSED*/
148 1.10 kleink int
149 1.10 kleink genfs_einval(v)
150 1.10 kleink void *v;
151 1.10 kleink {
152 1.10 kleink
153 1.10 kleink return (EINVAL);
154 1.1 mycroft }
155 1.1 mycroft
156 1.1 mycroft /*ARGSUSED*/
157 1.1 mycroft int
158 1.1 mycroft genfs_eopnotsupp(v)
159 1.1 mycroft void *v;
160 1.1 mycroft {
161 1.1 mycroft
162 1.1 mycroft return (EOPNOTSUPP);
163 1.1 mycroft }
164 1.1 mycroft
165 1.11.4.5 thorpej /*
166 1.11.4.5 thorpej * Called when an fs doesn't support a particular vop but the vop needs to
167 1.11.4.5 thorpej * vrele, vput, or vunlock passed in vnodes.
168 1.11.4.5 thorpej */
169 1.11.4.5 thorpej int
170 1.11.4.5 thorpej genfs_eopnotsupp_rele(v)
171 1.11.4.5 thorpej void *v;
172 1.11.4.5 thorpej {
173 1.11.4.5 thorpej struct vop_generic_args /*
174 1.11.4.5 thorpej struct vnodeop_desc *a_desc;
175 1.11.4.5 thorpej / * other random data follows, presumably * /
176 1.11.4.5 thorpej } */ *ap = v;
177 1.11.4.5 thorpej struct vnodeop_desc *desc = ap->a_desc;
178 1.11.4.5 thorpej struct vnode *vp;
179 1.11.4.5 thorpej int flags, i, j, offset;
180 1.11.4.5 thorpej
181 1.11.4.5 thorpej flags = desc->vdesc_flags;
182 1.11.4.5 thorpej for (i = 0; i < VDESC_MAX_VPS; flags >>=1, i++) {
183 1.11.4.5 thorpej if ((offset = desc->vdesc_vp_offsets[i]) == VDESC_NO_OFFSET)
184 1.11.4.5 thorpej break; /* stop at end of list */
185 1.11.4.5 thorpej if ((j = flags & VDESC_VP0_WILLPUT)) {
186 1.11.4.5 thorpej vp = *VOPARG_OFFSETTO(struct vnode**,offset,ap);
187 1.11.4.5 thorpej switch (j) {
188 1.11.4.5 thorpej case VDESC_VP0_WILLPUT:
189 1.11.4.5 thorpej vput(vp);
190 1.11.4.5 thorpej break;
191 1.11.4.5 thorpej case VDESC_VP0_WILLUNLOCK:
192 1.11.4.5 thorpej VOP_UNLOCK(vp, 0);
193 1.11.4.5 thorpej break;
194 1.11.4.5 thorpej case VDESC_VP0_WILLRELE:
195 1.11.4.5 thorpej vrele(vp);
196 1.11.4.5 thorpej break;
197 1.11.4.5 thorpej }
198 1.11.4.5 thorpej }
199 1.11.4.5 thorpej }
200 1.11.4.5 thorpej
201 1.11.4.5 thorpej return (EOPNOTSUPP);
202 1.11.4.5 thorpej }
203 1.11.4.5 thorpej
204 1.1 mycroft /*ARGSUSED*/
205 1.1 mycroft int
206 1.1 mycroft genfs_ebadf(v)
207 1.1 mycroft void *v;
208 1.1 mycroft {
209 1.1 mycroft
210 1.1 mycroft return (EBADF);
211 1.9 matthias }
212 1.9 matthias
213 1.9 matthias /* ARGSUSED */
214 1.9 matthias int
215 1.9 matthias genfs_enoioctl(v)
216 1.9 matthias void *v;
217 1.9 matthias {
218 1.9 matthias
219 1.9 matthias return (ENOTTY);
220 1.6 fvdl }
221 1.6 fvdl
222 1.6 fvdl
223 1.6 fvdl /*
224 1.6 fvdl * Eliminate all activity associated with the requested vnode
225 1.6 fvdl * and with all vnodes aliased to the requested vnode.
226 1.6 fvdl */
227 1.6 fvdl int
228 1.6 fvdl genfs_revoke(v)
229 1.6 fvdl void *v;
230 1.6 fvdl {
231 1.6 fvdl struct vop_revoke_args /* {
232 1.6 fvdl struct vnode *a_vp;
233 1.6 fvdl int a_flags;
234 1.6 fvdl } */ *ap = v;
235 1.6 fvdl struct vnode *vp, *vq;
236 1.6 fvdl struct proc *p = curproc; /* XXX */
237 1.6 fvdl
238 1.6 fvdl #ifdef DIAGNOSTIC
239 1.6 fvdl if ((ap->a_flags & REVOKEALL) == 0)
240 1.6 fvdl panic("genfs_revoke: not revokeall");
241 1.6 fvdl #endif
242 1.6 fvdl
243 1.6 fvdl vp = ap->a_vp;
244 1.6 fvdl simple_lock(&vp->v_interlock);
245 1.6 fvdl
246 1.6 fvdl if (vp->v_flag & VALIASED) {
247 1.6 fvdl /*
248 1.6 fvdl * If a vgone (or vclean) is already in progress,
249 1.6 fvdl * wait until it is done and return.
250 1.6 fvdl */
251 1.6 fvdl if (vp->v_flag & VXLOCK) {
252 1.6 fvdl vp->v_flag |= VXWANT;
253 1.6 fvdl simple_unlock(&vp->v_interlock);
254 1.6 fvdl tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0);
255 1.6 fvdl return (0);
256 1.6 fvdl }
257 1.6 fvdl /*
258 1.6 fvdl * Ensure that vp will not be vgone'd while we
259 1.6 fvdl * are eliminating its aliases.
260 1.6 fvdl */
261 1.6 fvdl vp->v_flag |= VXLOCK;
262 1.6 fvdl simple_unlock(&vp->v_interlock);
263 1.6 fvdl while (vp->v_flag & VALIASED) {
264 1.6 fvdl simple_lock(&spechash_slock);
265 1.6 fvdl for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
266 1.6 fvdl if (vq->v_rdev != vp->v_rdev ||
267 1.6 fvdl vq->v_type != vp->v_type || vp == vq)
268 1.6 fvdl continue;
269 1.6 fvdl simple_unlock(&spechash_slock);
270 1.6 fvdl vgone(vq);
271 1.6 fvdl break;
272 1.6 fvdl }
273 1.6 fvdl if (vq == NULLVP)
274 1.6 fvdl simple_unlock(&spechash_slock);
275 1.6 fvdl }
276 1.6 fvdl /*
277 1.6 fvdl * Remove the lock so that vgone below will
278 1.6 fvdl * really eliminate the vnode after which time
279 1.6 fvdl * vgone will awaken any sleepers.
280 1.6 fvdl */
281 1.6 fvdl simple_lock(&vp->v_interlock);
282 1.6 fvdl vp->v_flag &= ~VXLOCK;
283 1.6 fvdl }
284 1.6 fvdl vgonel(vp, p);
285 1.6 fvdl return (0);
286 1.6 fvdl }
287 1.6 fvdl
288 1.6 fvdl /*
289 1.11.4.5 thorpej * Lock the node.
290 1.6 fvdl */
291 1.6 fvdl int
292 1.11.4.5 thorpej genfs_lock(v)
293 1.6 fvdl void *v;
294 1.6 fvdl {
295 1.6 fvdl struct vop_lock_args /* {
296 1.6 fvdl struct vnode *a_vp;
297 1.6 fvdl int a_flags;
298 1.6 fvdl struct proc *a_p;
299 1.6 fvdl } */ *ap = v;
300 1.6 fvdl struct vnode *vp = ap->a_vp;
301 1.6 fvdl
302 1.11.4.5 thorpej return (lockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock));
303 1.6 fvdl }
304 1.6 fvdl
305 1.6 fvdl /*
306 1.11.4.5 thorpej * Unlock the node.
307 1.6 fvdl */
308 1.6 fvdl int
309 1.11.4.5 thorpej genfs_unlock(v)
310 1.6 fvdl void *v;
311 1.6 fvdl {
312 1.6 fvdl struct vop_unlock_args /* {
313 1.6 fvdl struct vnode *a_vp;
314 1.6 fvdl int a_flags;
315 1.6 fvdl struct proc *a_p;
316 1.6 fvdl } */ *ap = v;
317 1.6 fvdl struct vnode *vp = ap->a_vp;
318 1.6 fvdl
319 1.11.4.5 thorpej return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE,
320 1.11.4.5 thorpej &vp->v_interlock));
321 1.6 fvdl }
322 1.6 fvdl
323 1.6 fvdl /*
324 1.11.4.5 thorpej * Return whether or not the node is locked.
325 1.6 fvdl */
326 1.6 fvdl int
327 1.11.4.5 thorpej genfs_islocked(v)
328 1.6 fvdl void *v;
329 1.6 fvdl {
330 1.6 fvdl struct vop_islocked_args /* {
331 1.6 fvdl struct vnode *a_vp;
332 1.6 fvdl } */ *ap = v;
333 1.6 fvdl struct vnode *vp = ap->a_vp;
334 1.6 fvdl
335 1.11.4.5 thorpej return (lockstatus(&vp->v_lock));
336 1.11.4.5 thorpej }
337 1.11.4.5 thorpej
338 1.11.4.5 thorpej /*
339 1.11.4.5 thorpej * Stubs to use when there is no locking to be done on the underlying object.
340 1.11.4.5 thorpej */
341 1.11.4.5 thorpej int
342 1.11.4.5 thorpej genfs_nolock(v)
343 1.11.4.5 thorpej void *v;
344 1.11.4.5 thorpej {
345 1.11.4.5 thorpej struct vop_lock_args /* {
346 1.11.4.5 thorpej struct vnode *a_vp;
347 1.11.4.5 thorpej int a_flags;
348 1.11.4.5 thorpej struct proc *a_p;
349 1.11.4.5 thorpej } */ *ap = v;
350 1.11.4.5 thorpej
351 1.11.4.5 thorpej /*
352 1.11.4.5 thorpej * Since we are not using the lock manager, we must clear
353 1.11.4.5 thorpej * the interlock here.
354 1.11.4.5 thorpej */
355 1.11.4.5 thorpej if (ap->a_flags & LK_INTERLOCK)
356 1.11.4.5 thorpej simple_unlock(&ap->a_vp->v_interlock);
357 1.11.4.5 thorpej return (0);
358 1.11.4.5 thorpej }
359 1.11.4.5 thorpej
360 1.11.4.5 thorpej int
361 1.11.4.5 thorpej genfs_nounlock(v)
362 1.11.4.5 thorpej void *v;
363 1.11.4.5 thorpej {
364 1.11.4.5 thorpej return (0);
365 1.11.4.5 thorpej }
366 1.11.4.5 thorpej
367 1.11.4.5 thorpej int
368 1.11.4.5 thorpej genfs_noislocked(v)
369 1.11.4.5 thorpej void *v;
370 1.11.4.5 thorpej {
371 1.11.4.5 thorpej return (0);
372 1.8 thorpej }
373 1.8 thorpej
374 1.8 thorpej /*
375 1.8 thorpej * Local lease check for NFS servers. Just set up args and let
376 1.8 thorpej * nqsrv_getlease() do the rest. If NFSSERVER is not in the kernel,
377 1.8 thorpej * this is a null operation.
378 1.8 thorpej */
379 1.8 thorpej int
380 1.8 thorpej genfs_lease_check(v)
381 1.8 thorpej void *v;
382 1.8 thorpej {
383 1.8 thorpej #ifdef NFSSERVER
384 1.8 thorpej struct vop_lease_args /* {
385 1.8 thorpej struct vnode *a_vp;
386 1.8 thorpej struct proc *a_p;
387 1.8 thorpej struct ucred *a_cred;
388 1.8 thorpej int a_flag;
389 1.8 thorpej } */ *ap = v;
390 1.8 thorpej u_int32_t duration = 0;
391 1.8 thorpej int cache;
392 1.8 thorpej u_quad_t frev;
393 1.8 thorpej
394 1.8 thorpej (void) nqsrv_getlease(ap->a_vp, &duration, ND_CHECK | ap->a_flag,
395 1.8 thorpej NQLOCALSLP, ap->a_p, (struct mbuf *)0, &cache, &frev, ap->a_cred);
396 1.8 thorpej return (0);
397 1.8 thorpej #else
398 1.8 thorpej return (0);
399 1.8 thorpej #endif /* NFSSERVER */
400 1.11.4.1 chs }
401 1.11.4.1 chs
402 1.11.4.1 chs /*
403 1.11.4.1 chs * generic VM getpages routine.
404 1.11.4.1 chs * Return PG_BUSY pages for the given range,
405 1.11.4.1 chs * reading from backing store if necessary.
406 1.11.4.1 chs */
407 1.11.4.4 chs
408 1.11.4.1 chs int
409 1.11.4.1 chs genfs_getpages(v)
410 1.11.4.1 chs void *v;
411 1.11.4.1 chs {
412 1.11.4.1 chs struct vop_getpages_args /* {
413 1.11.4.1 chs struct vnode *a_vp;
414 1.11.4.6 chs voff_t a_offset;
415 1.11.4.1 chs vm_page_t *a_m;
416 1.11.4.1 chs int *a_count;
417 1.11.4.1 chs int a_centeridx;
418 1.11.4.1 chs vm_prot_t a_access_type;
419 1.11.4.1 chs int a_advice;
420 1.11.4.1 chs int a_flags;
421 1.11.4.1 chs } */ *ap = v;
422 1.11.4.1 chs
423 1.11.4.4 chs off_t eof, offset, origoffset, startoffset, endoffset;
424 1.11.4.1 chs daddr_t lbn, blkno;
425 1.11.4.4 chs int s, i, error, npages, npgs, run, ridx, pidx, pcount;
426 1.11.4.2 chs int bsize, bshift, dev_bshift, dev_bsize;
427 1.11.4.1 chs int flags = ap->a_flags;
428 1.11.4.2 chs size_t bytes, iobytes, tailbytes, totalbytes, skipbytes;
429 1.11.4.2 chs boolean_t sawhole = FALSE;
430 1.11.4.1 chs char *kva;
431 1.11.4.1 chs struct buf *bp, *mbp;
432 1.11.4.1 chs struct vnode *vp = ap->a_vp;
433 1.11.4.1 chs struct uvm_object *uobj = &vp->v_uvm.u_obj;
434 1.11.4.4 chs struct vm_page *pgs[16]; /* XXX 16 */
435 1.11.4.2 chs struct ucred *cred = curproc->p_ucred; /* XXX curproc */
436 1.11.4.1 chs UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
437 1.11.4.1 chs
438 1.11.4.1 chs #ifdef DIAGNOSTIC
439 1.11.4.1 chs if (ap->a_centeridx < 0 || ap->a_centeridx > *ap->a_count) {
440 1.11.4.1 chs panic("genfs_getpages: centeridx %d out of range",
441 1.11.4.1 chs ap->a_centeridx);
442 1.11.4.1 chs }
443 1.11.4.1 chs if (ap->a_offset & (PAGE_SIZE - 1)) {
444 1.11.4.1 chs panic("genfs_getpages: offset 0x%x", (int)ap->a_offset);
445 1.11.4.1 chs }
446 1.11.4.4 chs if (*ap->a_count < 0) {
447 1.11.4.4 chs panic("genfs_getpages: count %d < 0", *ap->a_count);
448 1.11.4.4 chs }
449 1.11.4.1 chs #endif
450 1.11.4.1 chs
451 1.11.4.1 chs /*
452 1.11.4.1 chs * Bounds-check the request.
453 1.11.4.1 chs */
454 1.11.4.1 chs
455 1.11.4.4 chs eof = vp->v_uvm.u_size;
456 1.11.4.4 chs if (ap->a_offset >= eof) {
457 1.11.4.1 chs if ((flags & PGO_LOCKED) == 0) {
458 1.11.4.1 chs simple_unlock(&uobj->vmobjlock);
459 1.11.4.1 chs }
460 1.11.4.4 chs UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
461 1.11.4.4 chs (int)ap->a_offset, *ap->a_count, (int)eof,0);
462 1.11.4.2 chs return EINVAL;
463 1.11.4.1 chs }
464 1.11.4.1 chs
465 1.11.4.1 chs /*
466 1.11.4.1 chs * For PGO_LOCKED requests, just return whatever's in memory.
467 1.11.4.1 chs */
468 1.11.4.1 chs
469 1.11.4.1 chs if (flags & PGO_LOCKED) {
470 1.11.4.1 chs uvn_findpages(uobj, ap->a_offset, ap->a_count, ap->a_m,
471 1.11.4.1 chs UFP_NOWAIT|UFP_NOALLOC|UFP_NORDONLY);
472 1.11.4.1 chs
473 1.11.4.2 chs return ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
474 1.11.4.1 chs }
475 1.11.4.1 chs
476 1.11.4.4 chs if (ap->a_offset + ((*ap->a_count - 1) << PAGE_SHIFT) >= eof) {
477 1.11.4.4 chs panic("genfs_getpages: non LOCKED req past EOF vp %p", vp);
478 1.11.4.4 chs }
479 1.11.4.4 chs
480 1.11.4.1 chs /* vnode is VOP_LOCKed, uobj is locked */
481 1.11.4.1 chs
482 1.11.4.1 chs error = 0;
483 1.11.4.1 chs
484 1.11.4.1 chs /*
485 1.11.4.4 chs * find the requested pages and make some simple checks.
486 1.11.4.4 chs * leave space in the page array for a whole block.
487 1.11.4.1 chs */
488 1.11.4.1 chs
489 1.11.4.4 chs bshift = vp->v_mount->mnt_fs_bshift;
490 1.11.4.4 chs bsize = 1 << bshift;
491 1.11.4.4 chs dev_bshift = vp->v_mount->mnt_dev_bshift;
492 1.11.4.4 chs dev_bsize = 1 << dev_bshift;
493 1.11.4.4 chs
494 1.11.4.4 chs npages = *ap->a_count;
495 1.11.4.4 chs origoffset = ap->a_offset;
496 1.11.4.4 chs startoffset = origoffset & ~((off_t)bsize - 1);
497 1.11.4.4 chs endoffset = round_page((origoffset + (npages << PAGE_SHIFT)
498 1.11.4.4 chs + bsize - 1) & ~((off_t)bsize - 1));
499 1.11.4.4 chs ridx = (origoffset - startoffset) >> PAGE_SHIFT;
500 1.11.4.4 chs
501 1.11.4.4 chs memset(pgs, 0, sizeof(pgs));
502 1.11.4.4 chs uvn_findpages(uobj, origoffset, &npages, &pgs[ridx], UFP_ALL);
503 1.11.4.2 chs
504 1.11.4.1 chs /*
505 1.11.4.4 chs * if PGO_OVERWRITE is set, don't bother reading the pages.
506 1.11.4.2 chs * PGO_OVERWRITE also means that the caller guarantees
507 1.11.4.4 chs * that the pages already have backing store allocated.
508 1.11.4.1 chs */
509 1.11.4.1 chs
510 1.11.4.2 chs if (flags & PGO_OVERWRITE) {
511 1.11.4.2 chs UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
512 1.11.4.1 chs
513 1.11.4.2 chs /* XXX for now, zero the page if we allocated it */
514 1.11.4.4 chs for (i = 0; i < npages; i++) {
515 1.11.4.4 chs struct vm_page *pg = pgs[ridx + i];
516 1.11.4.4 chs if (pg->flags & PG_FAKE) {
517 1.11.4.4 chs uvm_pagezero(pg);
518 1.11.4.4 chs pg->flags &= ~PG_FAKE;
519 1.11.4.4 chs }
520 1.11.4.2 chs }
521 1.11.4.1 chs
522 1.11.4.2 chs simple_unlock(&uobj->vmobjlock);
523 1.11.4.2 chs goto out;
524 1.11.4.2 chs }
525 1.11.4.1 chs
526 1.11.4.1 chs /*
527 1.11.4.4 chs * if the pages are already resident, just return them.
528 1.11.4.1 chs */
529 1.11.4.1 chs
530 1.11.4.4 chs for (i = 0; i < npages; i++) {
531 1.11.4.4 chs struct vm_page *pg = pgs[ridx + i];
532 1.11.4.3 chs
533 1.11.4.4 chs if ((pg->flags & PG_FAKE) != 0 ||
534 1.11.4.4 chs ((ap->a_access_type & VM_PROT_WRITE) &&
535 1.11.4.4 chs (pg->flags & PG_RDONLY))) {
536 1.11.4.4 chs break;
537 1.11.4.4 chs }
538 1.11.4.4 chs }
539 1.11.4.4 chs if (i == npages) {
540 1.11.4.4 chs UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
541 1.11.4.2 chs simple_unlock(&uobj->vmobjlock);
542 1.11.4.3 chs goto out;
543 1.11.4.1 chs }
544 1.11.4.1 chs
545 1.11.4.1 chs /*
546 1.11.4.2 chs * the page wasn't resident and we're not overwriting,
547 1.11.4.2 chs * so we're going to have to do some i/o.
548 1.11.4.4 chs * find any additional pages needed to cover the expanded range.
549 1.11.4.1 chs */
550 1.11.4.1 chs
551 1.11.4.4 chs if (startoffset != origoffset) {
552 1.11.4.4 chs UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
553 1.11.4.4 chs (int)startoffset, (int)endoffset, 0,0);
554 1.11.4.4 chs npages = (endoffset - startoffset) >> PAGE_SHIFT;
555 1.11.4.4 chs if (npages == 0) {
556 1.11.4.4 chs panic("XXX getpages npages = 0");
557 1.11.4.4 chs }
558 1.11.4.4 chs npgs = npages;
559 1.11.4.4 chs uvn_findpages(uobj, startoffset, &npgs, pgs, UFP_ALL);
560 1.11.4.1 chs }
561 1.11.4.2 chs simple_unlock(&uobj->vmobjlock);
562 1.11.4.1 chs
563 1.11.4.1 chs /*
564 1.11.4.2 chs * read the desired page(s).
565 1.11.4.1 chs */
566 1.11.4.1 chs
567 1.11.4.2 chs totalbytes = npages << PAGE_SHIFT;
568 1.11.4.4 chs bytes = min(totalbytes,
569 1.11.4.4 chs (vp->v_uvm.u_size - startoffset + dev_bsize - 1) &
570 1.11.4.1 chs ~(dev_bsize - 1));
571 1.11.4.2 chs tailbytes = totalbytes - bytes;
572 1.11.4.2 chs skipbytes = 0;
573 1.11.4.1 chs
574 1.11.4.2 chs kva = (void *)uvm_pagermapin(pgs, npages, M_WAITOK);
575 1.11.4.1 chs
576 1.11.4.1 chs s = splbio();
577 1.11.4.1 chs mbp = pool_get(&bufpool, PR_WAITOK);
578 1.11.4.1 chs splx(s);
579 1.11.4.1 chs mbp->b_bufsize = bytes;
580 1.11.4.4 chs mbp->b_data = kva;
581 1.11.4.2 chs mbp->b_resid = mbp->b_bcount = bytes;
582 1.11.4.1 chs mbp->b_flags = B_BUSY|B_READ| (flags & PGO_SYNCIO ? 0 : B_CALL);
583 1.11.4.1 chs mbp->b_iodone = uvm_aio_biodone;
584 1.11.4.1 chs mbp->b_vp = vp;
585 1.11.4.1 chs
586 1.11.4.4 chs /*
587 1.11.4.4 chs * if EOF is in the middle of the last page, zero the part past EOF.
588 1.11.4.4 chs */
589 1.11.4.4 chs
590 1.11.4.4 chs if (tailbytes > 0) {
591 1.11.4.4 chs memset(kva + bytes, 0, tailbytes);
592 1.11.4.4 chs }
593 1.11.4.4 chs
594 1.11.4.4 chs /*
595 1.11.4.4 chs * now loop over the pages, reading as needed.
596 1.11.4.4 chs */
597 1.11.4.4 chs
598 1.11.4.1 chs bp = NULL;
599 1.11.4.4 chs offset = startoffset;
600 1.11.4.1 chs for (; bytes > 0; offset += iobytes, bytes -= iobytes) {
601 1.11.4.2 chs
602 1.11.4.2 chs /*
603 1.11.4.2 chs * skip pages which don't need to be read.
604 1.11.4.2 chs */
605 1.11.4.2 chs
606 1.11.4.2 chs pidx = (offset - startoffset) >> PAGE_SHIFT;
607 1.11.4.2 chs while ((pgs[pidx]->flags & PG_FAKE) == 0) {
608 1.11.4.2 chs size_t b;
609 1.11.4.2 chs
610 1.11.4.2 chs if (offset & (PAGE_SIZE - 1)) {
611 1.11.4.2 chs panic("genfs_getpages: skipping from middle "
612 1.11.4.2 chs "of page");
613 1.11.4.2 chs }
614 1.11.4.2 chs
615 1.11.4.2 chs b = min(PAGE_SIZE, bytes);
616 1.11.4.2 chs offset += b;
617 1.11.4.2 chs bytes -= b;
618 1.11.4.2 chs skipbytes += b;
619 1.11.4.2 chs pidx++;
620 1.11.4.4 chs UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
621 1.11.4.4 chs (int)offset, 0,0,0);
622 1.11.4.2 chs if (bytes == 0) {
623 1.11.4.2 chs goto loopdone;
624 1.11.4.2 chs }
625 1.11.4.2 chs }
626 1.11.4.1 chs
627 1.11.4.1 chs /*
628 1.11.4.1 chs * bmap the file to find out the blkno to read from and
629 1.11.4.4 chs * how much we can read in one i/o. if bmap returns an error,
630 1.11.4.4 chs * skip the rest of the top-level i/o.
631 1.11.4.1 chs */
632 1.11.4.1 chs
633 1.11.4.2 chs lbn = offset >> bshift;
634 1.11.4.1 chs error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
635 1.11.4.1 chs if (error) {
636 1.11.4.1 chs UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
637 1.11.4.1 chs lbn, error,0,0);
638 1.11.4.4 chs skipbytes += bytes;
639 1.11.4.4 chs tailbytes = 0;
640 1.11.4.4 chs goto loopdone;
641 1.11.4.1 chs }
642 1.11.4.1 chs
643 1.11.4.2 chs /*
644 1.11.4.4 chs * see how many pages can be read with this i/o.
645 1.11.4.2 chs * reduce the i/o size if necessary.
646 1.11.4.2 chs */
647 1.11.4.1 chs
648 1.11.4.2 chs iobytes = min(((lbn + 1 + run) << bshift) - offset, bytes);
649 1.11.4.2 chs if (offset + iobytes > round_page(offset)) {
650 1.11.4.2 chs pcount = 1;
651 1.11.4.2 chs while (pidx + pcount < npages &&
652 1.11.4.2 chs pgs[pidx + pcount]->flags & PG_FAKE) {
653 1.11.4.2 chs pcount++;
654 1.11.4.1 chs }
655 1.11.4.2 chs iobytes = min(iobytes, (pcount << PAGE_SHIFT) -
656 1.11.4.2 chs (offset - trunc_page(offset)));
657 1.11.4.2 chs }
658 1.11.4.1 chs
659 1.11.4.2 chs /*
660 1.11.4.2 chs * if this block isn't allocated, zero it instead of reading it.
661 1.11.4.4 chs * if this is a read access, mark the pages we zeroed PG_RDONLY.
662 1.11.4.2 chs */
663 1.11.4.1 chs
664 1.11.4.7 perseant if (blkno < 0) {
665 1.11.4.2 chs UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
666 1.11.4.1 chs
667 1.11.4.2 chs sawhole = TRUE;
668 1.11.4.4 chs memset(kva + (offset - startoffset), 0, iobytes);
669 1.11.4.4 chs
670 1.11.4.4 chs if (ap->a_access_type == VM_PROT_READ) {
671 1.11.4.4 chs int holepages =
672 1.11.4.4 chs (round_page(offset + iobytes) -
673 1.11.4.4 chs trunc_page(offset)) >> PAGE_SHIFT;
674 1.11.4.4 chs for (i = 0; i < holepages; i++) {
675 1.11.4.4 chs pgs[pidx + i]->flags |= PG_RDONLY;
676 1.11.4.4 chs }
677 1.11.4.4 chs }
678 1.11.4.1 chs continue;
679 1.11.4.1 chs }
680 1.11.4.1 chs
681 1.11.4.1 chs /*
682 1.11.4.1 chs * allocate a sub-buf for this piece of the i/o
683 1.11.4.1 chs * (or just use mbp if there's only 1 piece),
684 1.11.4.1 chs * and start it going.
685 1.11.4.1 chs */
686 1.11.4.1 chs
687 1.11.4.4 chs if (offset == startoffset && iobytes == bytes) {
688 1.11.4.1 chs bp = mbp;
689 1.11.4.1 chs } else {
690 1.11.4.1 chs s = splbio();
691 1.11.4.1 chs bp = pool_get(&bufpool, PR_WAITOK);
692 1.11.4.1 chs splx(s);
693 1.11.4.4 chs bp->b_data = kva + offset - startoffset;
694 1.11.4.2 chs bp->b_resid = bp->b_bcount = iobytes;
695 1.11.4.1 chs bp->b_flags = B_BUSY|B_READ|B_CALL;
696 1.11.4.1 chs bp->b_iodone = uvm_aio_biodone1;
697 1.11.4.1 chs bp->b_vp = vp;
698 1.11.4.1 chs }
699 1.11.4.1 chs bp->b_lblkno = 0;
700 1.11.4.1 chs bp->b_private = mbp;
701 1.11.4.1 chs
702 1.11.4.1 chs /* adjust physical blkno for partial blocks */
703 1.11.4.1 chs bp->b_blkno = blkno + ((offset - (lbn << bshift)) >>
704 1.11.4.1 chs dev_bshift);
705 1.11.4.2 chs
706 1.11.4.2 chs UVMHIST_LOG(ubchist, "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
707 1.11.4.2 chs bp, (int)offset, (int)iobytes, bp->b_blkno);
708 1.11.4.1 chs
709 1.11.4.1 chs VOP_STRATEGY(bp);
710 1.11.4.1 chs }
711 1.11.4.2 chs
712 1.11.4.4 chs loopdone:
713 1.11.4.4 chs s = splbio();
714 1.11.4.2 chs if (skipbytes) {
715 1.11.4.2 chs mbp->b_resid -= skipbytes;
716 1.11.4.2 chs if (mbp->b_resid == 0) {
717 1.11.4.2 chs biodone(mbp);
718 1.11.4.2 chs }
719 1.11.4.2 chs }
720 1.11.4.4 chs splx(s);
721 1.11.4.1 chs if ((flags & PGO_SYNCIO) == 0) {
722 1.11.4.2 chs UVMHIST_LOG(ubchist, "returning PEND",0,0,0,0);
723 1.11.4.2 chs return EINPROGRESS;
724 1.11.4.1 chs }
725 1.11.4.1 chs if (bp != NULL) {
726 1.11.4.1 chs error = biowait(mbp);
727 1.11.4.1 chs }
728 1.11.4.1 chs s = splbio();
729 1.11.4.1 chs pool_put(&bufpool, mbp);
730 1.11.4.1 chs splx(s);
731 1.11.4.4 chs for (i = 0; i < npages; i++) {
732 1.11.4.4 chs UVMHIST_LOG(ubchist, "pgs[%d][0] = 0x%x",
733 1.11.4.4 chs i, *(int *)(kva + (i << PAGE_SHIFT)), 0,0);
734 1.11.4.4 chs }
735 1.11.4.2 chs uvm_pagermapout((vaddr_t)kva, npages);
736 1.11.4.2 chs
737 1.11.4.2 chs /*
738 1.11.4.2 chs * if this we encountered a hole then we have to do a little more work.
739 1.11.4.2 chs * for read faults, we must mark the page PG_RDONLY so that future
740 1.11.4.2 chs * write accesses to the page will fault again.
741 1.11.4.2 chs * for write faults, we must make sure that the backing store for
742 1.11.4.2 chs * the page is completely allocated.
743 1.11.4.2 chs */
744 1.11.4.2 chs
745 1.11.4.4 chs if (sawhole && ap->a_access_type == VM_PROT_WRITE) {
746 1.11.4.4 chs error = VOP_BALLOC(vp, startoffset, npages << PAGE_SHIFT,
747 1.11.4.4 chs cred, 0);
748 1.11.4.4 chs if (error) {
749 1.11.4.4 chs UVMHIST_LOG(ubchist, "balloc lbn 0x%x -> %d",
750 1.11.4.4 chs lbn, error,0,0);
751 1.11.4.4 chs goto out;
752 1.11.4.2 chs }
753 1.11.4.2 chs }
754 1.11.4.2 chs
755 1.11.4.2 chs /*
756 1.11.4.2 chs * see if we want to start any readahead.
757 1.11.4.2 chs * XXX writeme
758 1.11.4.2 chs */
759 1.11.4.2 chs
760 1.11.4.2 chs /*
761 1.11.4.2 chs * we're almost done! release the pages...
762 1.11.4.2 chs * for errors, we free the pages.
763 1.11.4.2 chs * otherwise we activate them and mark them as valid and clean.
764 1.11.4.2 chs * also, unbusy all but the center page.
765 1.11.4.2 chs */
766 1.11.4.1 chs
767 1.11.4.1 chs out:
768 1.11.4.1 chs if (error) {
769 1.11.4.1 chs simple_lock(&uobj->vmobjlock);
770 1.11.4.2 chs for (i = 0; i < npages; i++) {
771 1.11.4.2 chs UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
772 1.11.4.2 chs pgs[i], pgs[i]->flags, 0,0);
773 1.11.4.2 chs if (pgs[i]->flags & PG_FAKE) {
774 1.11.4.2 chs if (pgs[i]->flags & PG_WANTED) {
775 1.11.4.2 chs wakeup(pgs[i]);
776 1.11.4.2 chs }
777 1.11.4.2 chs uvm_pagefree(pgs[i]);
778 1.11.4.2 chs }
779 1.11.4.1 chs }
780 1.11.4.1 chs simple_unlock(&uobj->vmobjlock);
781 1.11.4.2 chs UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
782 1.11.4.2 chs return error;
783 1.11.4.1 chs }
784 1.11.4.2 chs
785 1.11.4.2 chs UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
786 1.11.4.2 chs simple_lock(&uobj->vmobjlock);
787 1.11.4.2 chs for (i = 0; i < npages; i++) {
788 1.11.4.4 chs if (pgs[i] == NULL) {
789 1.11.4.4 chs continue;
790 1.11.4.4 chs }
791 1.11.4.2 chs UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
792 1.11.4.2 chs pgs[i], pgs[i]->flags, 0,0);
793 1.11.4.2 chs if (pgs[i]->flags & PG_FAKE) {
794 1.11.4.2 chs UVMHIST_LOG(ubchist, "unfaking pg %p offset 0x%x",
795 1.11.4.2 chs pgs[i], (int)pgs[i]->offset,0,0);
796 1.11.4.2 chs pgs[i]->flags &= ~(PG_FAKE);
797 1.11.4.2 chs pmap_clear_modify(PMAP_PGARG(pgs[i]));
798 1.11.4.2 chs pmap_clear_reference(PMAP_PGARG(pgs[i]));
799 1.11.4.2 chs }
800 1.11.4.4 chs if (i < ridx || i >= ridx + *ap->a_count) {
801 1.11.4.2 chs UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
802 1.11.4.2 chs pgs[i], (int)pgs[i]->offset,0,0);
803 1.11.4.2 chs /*
804 1.11.4.2 chs KASSERT((pgs[i]->flags & PG_RELEASED) == 0);
805 1.11.4.2 chs */
806 1.11.4.2 chs
807 1.11.4.2 chs if (pgs[i]->flags & PG_WANTED) {
808 1.11.4.2 chs wakeup(pgs[i]);
809 1.11.4.2 chs }
810 1.11.4.2 chs pgs[i]->flags &= ~(PG_WANTED|PG_BUSY);
811 1.11.4.2 chs UVM_PAGE_OWN(pgs[i], NULL);
812 1.11.4.2 chs }
813 1.11.4.2 chs }
814 1.11.4.2 chs simple_unlock(&uobj->vmobjlock);
815 1.11.4.4 chs memcpy(ap->a_m, &pgs[ridx], *ap->a_count * sizeof(struct vm_page *));
816 1.11.4.2 chs return 0;
817 1.11.4.1 chs }
818 1.11.4.1 chs
819 1.11.4.1 chs /*
820 1.11.4.1 chs * generic VM putpages routine.
821 1.11.4.1 chs * Write the given range of pages to backing store.
822 1.11.4.1 chs */
823 1.11.4.1 chs int
824 1.11.4.1 chs genfs_putpages(v)
825 1.11.4.1 chs void *v;
826 1.11.4.1 chs {
827 1.11.4.1 chs struct vop_putpages_args /* {
828 1.11.4.1 chs struct vnode *a_vp;
829 1.11.4.1 chs struct vm_page **a_m;
830 1.11.4.1 chs int a_count;
831 1.11.4.7 perseant int a_flags;
832 1.11.4.1 chs int *a_rtvals;
833 1.11.4.1 chs } */ *ap = v;
834 1.11.4.1 chs
835 1.11.4.4 chs int s, error, npages, bshift, dev_bshift, dev_bsize, run;
836 1.11.4.4 chs char * kva;
837 1.11.4.4 chs off_t offset, startoffset;
838 1.11.4.4 chs size_t bytes, iobytes, skipbytes;
839 1.11.4.1 chs daddr_t lbn, blkno;
840 1.11.4.1 chs struct vm_page *pg;
841 1.11.4.1 chs struct buf *mbp, *bp;
842 1.11.4.1 chs struct vnode *vp = ap->a_vp;
843 1.11.4.1 chs UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
844 1.11.4.1 chs
845 1.11.4.2 chs error = 0;
846 1.11.4.4 chs npages = ap->a_count;
847 1.11.4.1 chs bshift = vp->v_mount->mnt_fs_bshift;
848 1.11.4.1 chs dev_bshift = vp->v_mount->mnt_dev_bshift;
849 1.11.4.1 chs dev_bsize = 1 << dev_bshift;
850 1.11.4.1 chs
851 1.11.4.1 chs pg = ap->a_m[0];
852 1.11.4.4 chs startoffset = pg->offset;
853 1.11.4.4 chs bytes = min(npages << PAGE_SHIFT,
854 1.11.4.4 chs (vp->v_uvm.u_size - startoffset + dev_bsize - 1) &
855 1.11.4.4 chs ~((off_t)dev_bsize - 1));
856 1.11.4.4 chs skipbytes = 0;
857 1.11.4.1 chs
858 1.11.4.4 chs if (bytes == 0) {
859 1.11.4.4 chs panic("genfs_putpages: bytes == 0??? vp %p", vp);
860 1.11.4.4 chs }
861 1.11.4.4 chs
862 1.11.4.4 chs kva = (void *)uvm_pagermapin(ap->a_m, npages, M_WAITOK);
863 1.11.4.2 chs
864 1.11.4.1 chs s = splbio();
865 1.11.4.1 chs vp->v_numoutput++;
866 1.11.4.1 chs mbp = pool_get(&bufpool, PR_WAITOK);
867 1.11.4.4 chs UVMHIST_LOG(ubchist, "master vp %p bp %p num now %d",
868 1.11.4.4 chs vp, mbp, vp->v_numoutput, 0);
869 1.11.4.1 chs splx(s);
870 1.11.4.4 chs mbp->b_bufsize = npages << PAGE_SHIFT;
871 1.11.4.4 chs mbp->b_data = kva;
872 1.11.4.2 chs mbp->b_resid = mbp->b_bcount = bytes;
873 1.11.4.7 perseant mbp->b_flags = B_BUSY|B_WRITE| ((ap->a_flags & PGO_SYNCIO) ? 0 : B_CALL) |
874 1.11.4.1 chs (curproc == uvm.pagedaemon_proc ? B_PDAEMON : 0);
875 1.11.4.1 chs mbp->b_iodone = uvm_aio_biodone;
876 1.11.4.1 chs mbp->b_vp = vp;
877 1.11.4.1 chs
878 1.11.4.1 chs bp = NULL;
879 1.11.4.4 chs offset = startoffset;
880 1.11.4.1 chs for (; bytes > 0; offset += iobytes, bytes -= iobytes) {
881 1.11.4.1 chs lbn = offset >> bshift;
882 1.11.4.1 chs error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
883 1.11.4.1 chs if (error) {
884 1.11.4.4 chs UVMHIST_LOG(ubchist, "VOP_BMAP() -> %d", error,0,0,0);
885 1.11.4.1 chs goto errout;
886 1.11.4.1 chs }
887 1.11.4.1 chs
888 1.11.4.4 chs iobytes = min(((lbn + 1 + run) << bshift) - offset, bytes);
889 1.11.4.1 chs if (blkno == (daddr_t)-1) {
890 1.11.4.4 chs skipbytes += iobytes;
891 1.11.4.4 chs continue;
892 1.11.4.1 chs }
893 1.11.4.1 chs
894 1.11.4.1 chs /* if it's really one i/o, don't make a second buf */
895 1.11.4.4 chs if (offset == startoffset && iobytes == bytes) {
896 1.11.4.1 chs bp = mbp;
897 1.11.4.1 chs } else {
898 1.11.4.1 chs s = splbio();
899 1.11.4.1 chs vp->v_numoutput++;
900 1.11.4.1 chs bp = pool_get(&bufpool, PR_WAITOK);
901 1.11.4.4 chs UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
902 1.11.4.4 chs vp, bp, vp->v_numoutput, 0);
903 1.11.4.1 chs splx(s);
904 1.11.4.4 chs bp->b_data = kva + offset - pg->offset;
905 1.11.4.2 chs bp->b_resid = bp->b_bcount = iobytes;
906 1.11.4.1 chs bp->b_flags = B_BUSY|B_WRITE|B_CALL;
907 1.11.4.1 chs bp->b_iodone = uvm_aio_biodone1;
908 1.11.4.1 chs bp->b_vp = vp;
909 1.11.4.1 chs }
910 1.11.4.1 chs bp->b_lblkno = 0;
911 1.11.4.1 chs bp->b_private = mbp;
912 1.11.4.1 chs
913 1.11.4.1 chs /* adjust physical blkno for partial blocks */
914 1.11.4.1 chs bp->b_blkno = blkno + ((offset - (lbn << bshift)) >>
915 1.11.4.1 chs dev_bshift);
916 1.11.4.2 chs UVMHIST_LOG(ubchist, "vp %p offset 0x%x bcount 0x%x blkno 0x%x",
917 1.11.4.4 chs vp, (int)offset, (int)bp->b_bcount,
918 1.11.4.4 chs (int)bp->b_blkno);
919 1.11.4.1 chs VOP_STRATEGY(bp);
920 1.11.4.1 chs }
921 1.11.4.4 chs s = splbio();
922 1.11.4.4 chs if (skipbytes) {
923 1.11.4.4 chs mbp->b_resid -= skipbytes;
924 1.11.4.4 chs if (mbp->b_resid == 0) {
925 1.11.4.4 chs biodone(mbp);
926 1.11.4.4 chs }
927 1.11.4.4 chs }
928 1.11.4.4 chs splx(s);
929 1.11.4.7 perseant if (!(ap->a_flags & PGO_SYNCIO)) {
930 1.11.4.2 chs return EINPROGRESS;
931 1.11.4.1 chs }
932 1.11.4.1 chs
933 1.11.4.1 chs errout:
934 1.11.4.1 chs if (bp != NULL) {
935 1.11.4.1 chs error = biowait(mbp);
936 1.11.4.1 chs }
937 1.11.4.1 chs s = splbio();
938 1.11.4.1 chs pool_put(&bufpool, mbp);
939 1.11.4.1 chs splx(s);
940 1.11.4.4 chs uvm_pagermapout((vaddr_t)kva, npages);
941 1.11.4.1 chs UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
942 1.11.4.2 chs return error;
943 1.1 mycroft }
944