lfs_vnops.c revision 1.220 1 /* $NetBSD: lfs_vnops.c,v 1.220 2009/02/22 20:28:07 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31 /*
32 * Copyright (c) 1986, 1989, 1991, 1993, 1995
33 * The Regents of the University of California. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 * @(#)lfs_vnops.c 8.13 (Berkeley) 6/10/95
60 */
61
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: lfs_vnops.c,v 1.220 2009/02/22 20:28:07 ad Exp $");
64
65 #ifdef _KERNEL_OPT
66 #include "opt_compat_netbsd.h"
67 #endif
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/namei.h>
72 #include <sys/resourcevar.h>
73 #include <sys/kernel.h>
74 #include <sys/file.h>
75 #include <sys/stat.h>
76 #include <sys/buf.h>
77 #include <sys/proc.h>
78 #include <sys/mount.h>
79 #include <sys/vnode.h>
80 #include <sys/pool.h>
81 #include <sys/signalvar.h>
82 #include <sys/kauth.h>
83 #include <sys/syslog.h>
84 #include <sys/fstrans.h>
85
86 #include <miscfs/fifofs/fifo.h>
87 #include <miscfs/genfs/genfs.h>
88 #include <miscfs/specfs/specdev.h>
89
90 #include <ufs/ufs/inode.h>
91 #include <ufs/ufs/dir.h>
92 #include <ufs/ufs/ufsmount.h>
93 #include <ufs/ufs/ufs_extern.h>
94
95 #include <uvm/uvm.h>
96 #include <uvm/uvm_pmap.h>
97 #include <uvm/uvm_stat.h>
98 #include <uvm/uvm_pager.h>
99
100 #include <ufs/lfs/lfs.h>
101 #include <ufs/lfs/lfs_extern.h>
102
103 extern pid_t lfs_writer_daemon;
104 int lfs_ignore_lazy_sync = 1;
105
106 /* Global vfs data structures for lfs. */
107 int (**lfs_vnodeop_p)(void *);
108 const struct vnodeopv_entry_desc lfs_vnodeop_entries[] = {
109 { &vop_default_desc, vn_default_error },
110 { &vop_lookup_desc, ufs_lookup }, /* lookup */
111 { &vop_create_desc, lfs_create }, /* create */
112 { &vop_whiteout_desc, ufs_whiteout }, /* whiteout */
113 { &vop_mknod_desc, lfs_mknod }, /* mknod */
114 { &vop_open_desc, ufs_open }, /* open */
115 { &vop_close_desc, lfs_close }, /* close */
116 { &vop_access_desc, ufs_access }, /* access */
117 { &vop_getattr_desc, lfs_getattr }, /* getattr */
118 { &vop_setattr_desc, lfs_setattr }, /* setattr */
119 { &vop_read_desc, lfs_read }, /* read */
120 { &vop_write_desc, lfs_write }, /* write */
121 { &vop_ioctl_desc, ufs_ioctl }, /* ioctl */
122 { &vop_fcntl_desc, lfs_fcntl }, /* fcntl */
123 { &vop_poll_desc, ufs_poll }, /* poll */
124 { &vop_kqfilter_desc, genfs_kqfilter }, /* kqfilter */
125 { &vop_revoke_desc, ufs_revoke }, /* revoke */
126 { &vop_mmap_desc, lfs_mmap }, /* mmap */
127 { &vop_fsync_desc, lfs_fsync }, /* fsync */
128 { &vop_seek_desc, ufs_seek }, /* seek */
129 { &vop_remove_desc, lfs_remove }, /* remove */
130 { &vop_link_desc, lfs_link }, /* link */
131 { &vop_rename_desc, lfs_rename }, /* rename */
132 { &vop_mkdir_desc, lfs_mkdir }, /* mkdir */
133 { &vop_rmdir_desc, lfs_rmdir }, /* rmdir */
134 { &vop_symlink_desc, lfs_symlink }, /* symlink */
135 { &vop_readdir_desc, ufs_readdir }, /* readdir */
136 { &vop_readlink_desc, ufs_readlink }, /* readlink */
137 { &vop_abortop_desc, ufs_abortop }, /* abortop */
138 { &vop_inactive_desc, lfs_inactive }, /* inactive */
139 { &vop_reclaim_desc, lfs_reclaim }, /* reclaim */
140 { &vop_lock_desc, ufs_lock }, /* lock */
141 { &vop_unlock_desc, ufs_unlock }, /* unlock */
142 { &vop_bmap_desc, ufs_bmap }, /* bmap */
143 { &vop_strategy_desc, lfs_strategy }, /* strategy */
144 { &vop_print_desc, ufs_print }, /* print */
145 { &vop_islocked_desc, ufs_islocked }, /* islocked */
146 { &vop_pathconf_desc, ufs_pathconf }, /* pathconf */
147 { &vop_advlock_desc, ufs_advlock }, /* advlock */
148 { &vop_bwrite_desc, lfs_bwrite }, /* bwrite */
149 { &vop_getpages_desc, lfs_getpages }, /* getpages */
150 { &vop_putpages_desc, lfs_putpages }, /* putpages */
151 { NULL, NULL }
152 };
153 const struct vnodeopv_desc lfs_vnodeop_opv_desc =
154 { &lfs_vnodeop_p, lfs_vnodeop_entries };
155
156 int (**lfs_specop_p)(void *);
157 const struct vnodeopv_entry_desc lfs_specop_entries[] = {
158 { &vop_default_desc, vn_default_error },
159 { &vop_lookup_desc, spec_lookup }, /* lookup */
160 { &vop_create_desc, spec_create }, /* create */
161 { &vop_mknod_desc, spec_mknod }, /* mknod */
162 { &vop_open_desc, spec_open }, /* open */
163 { &vop_close_desc, lfsspec_close }, /* close */
164 { &vop_access_desc, ufs_access }, /* access */
165 { &vop_getattr_desc, lfs_getattr }, /* getattr */
166 { &vop_setattr_desc, lfs_setattr }, /* setattr */
167 { &vop_read_desc, ufsspec_read }, /* read */
168 { &vop_write_desc, ufsspec_write }, /* write */
169 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */
170 { &vop_fcntl_desc, ufs_fcntl }, /* fcntl */
171 { &vop_poll_desc, spec_poll }, /* poll */
172 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */
173 { &vop_revoke_desc, spec_revoke }, /* revoke */
174 { &vop_mmap_desc, spec_mmap }, /* mmap */
175 { &vop_fsync_desc, spec_fsync }, /* fsync */
176 { &vop_seek_desc, spec_seek }, /* seek */
177 { &vop_remove_desc, spec_remove }, /* remove */
178 { &vop_link_desc, spec_link }, /* link */
179 { &vop_rename_desc, spec_rename }, /* rename */
180 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */
181 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */
182 { &vop_symlink_desc, spec_symlink }, /* symlink */
183 { &vop_readdir_desc, spec_readdir }, /* readdir */
184 { &vop_readlink_desc, spec_readlink }, /* readlink */
185 { &vop_abortop_desc, spec_abortop }, /* abortop */
186 { &vop_inactive_desc, lfs_inactive }, /* inactive */
187 { &vop_reclaim_desc, lfs_reclaim }, /* reclaim */
188 { &vop_lock_desc, ufs_lock }, /* lock */
189 { &vop_unlock_desc, ufs_unlock }, /* unlock */
190 { &vop_bmap_desc, spec_bmap }, /* bmap */
191 { &vop_strategy_desc, spec_strategy }, /* strategy */
192 { &vop_print_desc, ufs_print }, /* print */
193 { &vop_islocked_desc, ufs_islocked }, /* islocked */
194 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */
195 { &vop_advlock_desc, spec_advlock }, /* advlock */
196 { &vop_bwrite_desc, vn_bwrite }, /* bwrite */
197 { &vop_getpages_desc, spec_getpages }, /* getpages */
198 { &vop_putpages_desc, spec_putpages }, /* putpages */
199 { NULL, NULL }
200 };
201 const struct vnodeopv_desc lfs_specop_opv_desc =
202 { &lfs_specop_p, lfs_specop_entries };
203
204 int (**lfs_fifoop_p)(void *);
205 const struct vnodeopv_entry_desc lfs_fifoop_entries[] = {
206 { &vop_default_desc, vn_default_error },
207 { &vop_lookup_desc, fifo_lookup }, /* lookup */
208 { &vop_create_desc, fifo_create }, /* create */
209 { &vop_mknod_desc, fifo_mknod }, /* mknod */
210 { &vop_open_desc, fifo_open }, /* open */
211 { &vop_close_desc, lfsfifo_close }, /* close */
212 { &vop_access_desc, ufs_access }, /* access */
213 { &vop_getattr_desc, lfs_getattr }, /* getattr */
214 { &vop_setattr_desc, lfs_setattr }, /* setattr */
215 { &vop_read_desc, ufsfifo_read }, /* read */
216 { &vop_write_desc, ufsfifo_write }, /* write */
217 { &vop_ioctl_desc, fifo_ioctl }, /* ioctl */
218 { &vop_fcntl_desc, ufs_fcntl }, /* fcntl */
219 { &vop_poll_desc, fifo_poll }, /* poll */
220 { &vop_kqfilter_desc, fifo_kqfilter }, /* kqfilter */
221 { &vop_revoke_desc, fifo_revoke }, /* revoke */
222 { &vop_mmap_desc, fifo_mmap }, /* mmap */
223 { &vop_fsync_desc, fifo_fsync }, /* fsync */
224 { &vop_seek_desc, fifo_seek }, /* seek */
225 { &vop_remove_desc, fifo_remove }, /* remove */
226 { &vop_link_desc, fifo_link }, /* link */
227 { &vop_rename_desc, fifo_rename }, /* rename */
228 { &vop_mkdir_desc, fifo_mkdir }, /* mkdir */
229 { &vop_rmdir_desc, fifo_rmdir }, /* rmdir */
230 { &vop_symlink_desc, fifo_symlink }, /* symlink */
231 { &vop_readdir_desc, fifo_readdir }, /* readdir */
232 { &vop_readlink_desc, fifo_readlink }, /* readlink */
233 { &vop_abortop_desc, fifo_abortop }, /* abortop */
234 { &vop_inactive_desc, lfs_inactive }, /* inactive */
235 { &vop_reclaim_desc, lfs_reclaim }, /* reclaim */
236 { &vop_lock_desc, ufs_lock }, /* lock */
237 { &vop_unlock_desc, ufs_unlock }, /* unlock */
238 { &vop_bmap_desc, fifo_bmap }, /* bmap */
239 { &vop_strategy_desc, fifo_strategy }, /* strategy */
240 { &vop_print_desc, ufs_print }, /* print */
241 { &vop_islocked_desc, ufs_islocked }, /* islocked */
242 { &vop_pathconf_desc, fifo_pathconf }, /* pathconf */
243 { &vop_advlock_desc, fifo_advlock }, /* advlock */
244 { &vop_bwrite_desc, lfs_bwrite }, /* bwrite */
245 { &vop_putpages_desc, fifo_putpages }, /* putpages */
246 { NULL, NULL }
247 };
248 const struct vnodeopv_desc lfs_fifoop_opv_desc =
249 { &lfs_fifoop_p, lfs_fifoop_entries };
250
251 static int check_dirty(struct lfs *, struct vnode *, off_t, off_t, off_t, int, int, struct vm_page **);
252
253 #define LFS_READWRITE
254 #include <ufs/ufs/ufs_readwrite.c>
255 #undef LFS_READWRITE
256
257 /*
258 * Synch an open file.
259 */
260 /* ARGSUSED */
261 int
262 lfs_fsync(void *v)
263 {
264 struct vop_fsync_args /* {
265 struct vnode *a_vp;
266 kauth_cred_t a_cred;
267 int a_flags;
268 off_t offlo;
269 off_t offhi;
270 } */ *ap = v;
271 struct vnode *vp = ap->a_vp;
272 int error, wait;
273 struct inode *ip = VTOI(vp);
274 struct lfs *fs = ip->i_lfs;
275
276 /* If we're mounted read-only, don't try to sync. */
277 if (fs->lfs_ronly)
278 return 0;
279
280 /*
281 * Trickle sync simply adds this vnode to the pager list, as if
282 * the pagedaemon had requested a pageout.
283 */
284 if (ap->a_flags & FSYNC_LAZY) {
285 if (lfs_ignore_lazy_sync == 0) {
286 mutex_enter(&lfs_lock);
287 if (!(ip->i_flags & IN_PAGING)) {
288 ip->i_flags |= IN_PAGING;
289 TAILQ_INSERT_TAIL(&fs->lfs_pchainhd, ip,
290 i_lfs_pchain);
291 }
292 wakeup(&lfs_writer_daemon);
293 mutex_exit(&lfs_lock);
294 }
295 return 0;
296 }
297
298 /*
299 * If a vnode is bring cleaned, flush it out before we try to
300 * reuse it. This prevents the cleaner from writing files twice
301 * in the same partial segment, causing an accounting underflow.
302 */
303 if (ap->a_flags & FSYNC_RECLAIM && ip->i_flags & IN_CLEANING) {
304 lfs_vflush(vp);
305 }
306
307 wait = (ap->a_flags & FSYNC_WAIT);
308 do {
309 mutex_enter(&vp->v_interlock);
310 error = VOP_PUTPAGES(vp, trunc_page(ap->a_offlo),
311 round_page(ap->a_offhi),
312 PGO_CLEANIT | (wait ? PGO_SYNCIO : 0));
313 if (error == EAGAIN) {
314 mutex_enter(&lfs_lock);
315 mtsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_fsync",
316 hz / 100 + 1, &lfs_lock);
317 mutex_exit(&lfs_lock);
318 }
319 } while (error == EAGAIN);
320 if (error)
321 return error;
322
323 if ((ap->a_flags & FSYNC_DATAONLY) == 0)
324 error = lfs_update(vp, NULL, NULL, wait ? UPDATE_WAIT : 0);
325
326 if (error == 0 && ap->a_flags & FSYNC_CACHE) {
327 int l = 0;
328 error = VOP_IOCTL(ip->i_devvp, DIOCCACHESYNC, &l, FWRITE,
329 curlwp->l_cred);
330 }
331 if (wait && !VPISEMPTY(vp))
332 LFS_SET_UINO(ip, IN_MODIFIED);
333
334 return error;
335 }
336
337 /*
338 * Take IN_ADIROP off, then call ufs_inactive.
339 */
340 int
341 lfs_inactive(void *v)
342 {
343 struct vop_inactive_args /* {
344 struct vnode *a_vp;
345 } */ *ap = v;
346
347 lfs_unmark_vnode(ap->a_vp);
348
349 /*
350 * The Ifile is only ever inactivated on unmount.
351 * Streamline this process by not giving it more dirty blocks.
352 */
353 if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM) {
354 mutex_enter(&lfs_lock);
355 LFS_CLR_UINO(VTOI(ap->a_vp), IN_ALLMOD);
356 mutex_exit(&lfs_lock);
357 VOP_UNLOCK(ap->a_vp, 0);
358 return 0;
359 }
360
361 return ufs_inactive(v);
362 }
363
364 /*
365 * These macros are used to bracket UFS directory ops, so that we can
366 * identify all the pages touched during directory ops which need to
367 * be ordered and flushed atomically, so that they may be recovered.
368 *
369 * Because we have to mark nodes VU_DIROP in order to prevent
370 * the cache from reclaiming them while a dirop is in progress, we must
371 * also manage the number of nodes so marked (otherwise we can run out).
372 * We do this by setting lfs_dirvcount to the number of marked vnodes; it
373 * is decremented during segment write, when VU_DIROP is taken off.
374 */
375 #define MARK_VNODE(vp) lfs_mark_vnode(vp)
376 #define UNMARK_VNODE(vp) lfs_unmark_vnode(vp)
377 #define SET_DIROP_CREATE(dvp, vpp) lfs_set_dirop_create((dvp), (vpp))
378 #define SET_DIROP_REMOVE(dvp, vp) lfs_set_dirop((dvp), (vp))
379 static int lfs_set_dirop_create(struct vnode *, struct vnode **);
380 static int lfs_set_dirop(struct vnode *, struct vnode *);
381
382 static int
383 lfs_set_dirop(struct vnode *dvp, struct vnode *vp)
384 {
385 struct lfs *fs;
386 int error;
387
388 KASSERT(VOP_ISLOCKED(dvp));
389 KASSERT(vp == NULL || VOP_ISLOCKED(vp));
390
391 fs = VTOI(dvp)->i_lfs;
392
393 ASSERT_NO_SEGLOCK(fs);
394 /*
395 * LFS_NRESERVE calculates direct and indirect blocks as well
396 * as an inode block; an overestimate in most cases.
397 */
398 if ((error = lfs_reserve(fs, dvp, vp, LFS_NRESERVE(fs))) != 0)
399 return (error);
400
401 restart:
402 mutex_enter(&lfs_lock);
403 if (fs->lfs_dirops == 0) {
404 mutex_exit(&lfs_lock);
405 lfs_check(dvp, LFS_UNUSED_LBN, 0);
406 mutex_enter(&lfs_lock);
407 }
408 while (fs->lfs_writer) {
409 error = mtsleep(&fs->lfs_dirops, (PRIBIO + 1) | PCATCH,
410 "lfs_sdirop", 0, &lfs_lock);
411 if (error == EINTR) {
412 mutex_exit(&lfs_lock);
413 goto unreserve;
414 }
415 }
416 if (lfs_dirvcount > LFS_MAX_DIROP && fs->lfs_dirops == 0) {
417 wakeup(&lfs_writer_daemon);
418 mutex_exit(&lfs_lock);
419 preempt();
420 goto restart;
421 }
422
423 if (lfs_dirvcount > LFS_MAX_DIROP) {
424 mutex_exit(&lfs_lock);
425 DLOG((DLOG_DIROP, "lfs_set_dirop: sleeping with dirops=%d, "
426 "dirvcount=%d\n", fs->lfs_dirops, lfs_dirvcount));
427 if ((error = mtsleep(&lfs_dirvcount,
428 PCATCH | PUSER | PNORELOCK, "lfs_maxdirop", 0,
429 &lfs_lock)) != 0) {
430 goto unreserve;
431 }
432 goto restart;
433 }
434
435 ++fs->lfs_dirops;
436 fs->lfs_doifile = 1;
437 mutex_exit(&lfs_lock);
438
439 /* Hold a reference so SET_ENDOP will be happy */
440 vref(dvp);
441 if (vp) {
442 vref(vp);
443 MARK_VNODE(vp);
444 }
445
446 MARK_VNODE(dvp);
447 return 0;
448
449 unreserve:
450 lfs_reserve(fs, dvp, vp, -LFS_NRESERVE(fs));
451 return error;
452 }
453
454 /*
455 * Get a new vnode *before* adjusting the dirop count, to avoid a deadlock
456 * in getnewvnode(), if we have a stacked filesystem mounted on top
457 * of us.
458 *
459 * NB: this means we have to clear the new vnodes on error. Fortunately
460 * SET_ENDOP is there to do that for us.
461 */
462 static int
463 lfs_set_dirop_create(struct vnode *dvp, struct vnode **vpp)
464 {
465 int error;
466 struct lfs *fs;
467
468 fs = VFSTOUFS(dvp->v_mount)->um_lfs;
469 ASSERT_NO_SEGLOCK(fs);
470 if (fs->lfs_ronly)
471 return EROFS;
472 if (vpp && (error = getnewvnode(VT_LFS, dvp->v_mount, lfs_vnodeop_p, vpp))) {
473 DLOG((DLOG_ALLOC, "lfs_set_dirop_create: dvp %p error %d\n",
474 dvp, error));
475 return error;
476 }
477 if ((error = lfs_set_dirop(dvp, NULL)) != 0) {
478 if (vpp) {
479 ungetnewvnode(*vpp);
480 *vpp = NULL;
481 }
482 return error;
483 }
484 return 0;
485 }
486
487 #define SET_ENDOP_BASE(fs, dvp, str) \
488 do { \
489 mutex_enter(&lfs_lock); \
490 --(fs)->lfs_dirops; \
491 if (!(fs)->lfs_dirops) { \
492 if ((fs)->lfs_nadirop) { \
493 panic("SET_ENDOP: %s: no dirops but " \
494 " nadirop=%d", (str), \
495 (fs)->lfs_nadirop); \
496 } \
497 wakeup(&(fs)->lfs_writer); \
498 mutex_exit(&lfs_lock); \
499 lfs_check((dvp), LFS_UNUSED_LBN, 0); \
500 } else \
501 mutex_exit(&lfs_lock); \
502 } while(0)
503 #define SET_ENDOP_CREATE(fs, dvp, nvpp, str) \
504 do { \
505 UNMARK_VNODE(dvp); \
506 if (nvpp && *nvpp) \
507 UNMARK_VNODE(*nvpp); \
508 /* Check for error return to stem vnode leakage */ \
509 if (nvpp && *nvpp && !((*nvpp)->v_uflag & VU_DIROP)) \
510 ungetnewvnode(*(nvpp)); \
511 SET_ENDOP_BASE((fs), (dvp), (str)); \
512 lfs_reserve((fs), (dvp), NULL, -LFS_NRESERVE(fs)); \
513 vrele(dvp); \
514 } while(0)
515 #define SET_ENDOP_CREATE_AP(ap, str) \
516 SET_ENDOP_CREATE(VTOI((ap)->a_dvp)->i_lfs, (ap)->a_dvp, \
517 (ap)->a_vpp, (str))
518 #define SET_ENDOP_REMOVE(fs, dvp, ovp, str) \
519 do { \
520 UNMARK_VNODE(dvp); \
521 if (ovp) \
522 UNMARK_VNODE(ovp); \
523 SET_ENDOP_BASE((fs), (dvp), (str)); \
524 lfs_reserve((fs), (dvp), (ovp), -LFS_NRESERVE(fs)); \
525 vrele(dvp); \
526 if (ovp) \
527 vrele(ovp); \
528 } while(0)
529
530 void
531 lfs_mark_vnode(struct vnode *vp)
532 {
533 struct inode *ip = VTOI(vp);
534 struct lfs *fs = ip->i_lfs;
535
536 mutex_enter(&lfs_lock);
537 if (!(ip->i_flag & IN_ADIROP)) {
538 if (!(vp->v_uflag & VU_DIROP)) {
539 mutex_enter(&vp->v_interlock);
540 (void)lfs_vref(vp);
541 ++lfs_dirvcount;
542 ++fs->lfs_dirvcount;
543 TAILQ_INSERT_TAIL(&fs->lfs_dchainhd, ip, i_lfs_dchain);
544 vp->v_uflag |= VU_DIROP;
545 }
546 ++fs->lfs_nadirop;
547 ip->i_flag |= IN_ADIROP;
548 } else
549 KASSERT(vp->v_uflag & VU_DIROP);
550 mutex_exit(&lfs_lock);
551 }
552
553 void
554 lfs_unmark_vnode(struct vnode *vp)
555 {
556 struct inode *ip = VTOI(vp);
557
558 if (ip && (ip->i_flag & IN_ADIROP)) {
559 KASSERT(vp->v_uflag & VU_DIROP);
560 mutex_enter(&lfs_lock);
561 --ip->i_lfs->lfs_nadirop;
562 mutex_exit(&lfs_lock);
563 ip->i_flag &= ~IN_ADIROP;
564 }
565 }
566
567 int
568 lfs_symlink(void *v)
569 {
570 struct vop_symlink_args /* {
571 struct vnode *a_dvp;
572 struct vnode **a_vpp;
573 struct componentname *a_cnp;
574 struct vattr *a_vap;
575 char *a_target;
576 } */ *ap = v;
577 int error;
578
579 if ((error = SET_DIROP_CREATE(ap->a_dvp, ap->a_vpp)) != 0) {
580 vput(ap->a_dvp);
581 return error;
582 }
583 error = ufs_symlink(ap);
584 SET_ENDOP_CREATE_AP(ap, "symlink");
585 return (error);
586 }
587
588 int
589 lfs_mknod(void *v)
590 {
591 struct vop_mknod_args /* {
592 struct vnode *a_dvp;
593 struct vnode **a_vpp;
594 struct componentname *a_cnp;
595 struct vattr *a_vap;
596 } */ *ap = v;
597 struct vattr *vap = ap->a_vap;
598 struct vnode **vpp = ap->a_vpp;
599 struct inode *ip;
600 int error;
601 struct mount *mp;
602 ino_t ino;
603
604 if ((error = SET_DIROP_CREATE(ap->a_dvp, ap->a_vpp)) != 0) {
605 vput(ap->a_dvp);
606 return error;
607 }
608 error = ufs_makeinode(MAKEIMODE(vap->va_type, vap->va_mode),
609 ap->a_dvp, vpp, ap->a_cnp);
610
611 /* Either way we're done with the dirop at this point */
612 SET_ENDOP_CREATE_AP(ap, "mknod");
613
614 if (error)
615 return (error);
616
617 ip = VTOI(*vpp);
618 mp = (*vpp)->v_mount;
619 ino = ip->i_number;
620 ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
621 if (vap->va_rdev != VNOVAL) {
622 /*
623 * Want to be able to use this to make badblock
624 * inodes, so don't truncate the dev number.
625 */
626 #if 0
627 ip->i_ffs1_rdev = ufs_rw32(vap->va_rdev,
628 UFS_MPNEEDSWAP((*vpp)->v_mount));
629 #else
630 ip->i_ffs1_rdev = vap->va_rdev;
631 #endif
632 }
633
634 /*
635 * Call fsync to write the vnode so that we don't have to deal with
636 * flushing it when it's marked VU_DIROP|VI_XLOCK.
637 *
638 * XXX KS - If we can't flush we also can't call vgone(), so must
639 * return. But, that leaves this vnode in limbo, also not good.
640 * Can this ever happen (barring hardware failure)?
641 */
642 if ((error = VOP_FSYNC(*vpp, NOCRED, FSYNC_WAIT, 0, 0)) != 0) {
643 panic("lfs_mknod: couldn't fsync (ino %llu)",
644 (unsigned long long)ino);
645 /* return (error); */
646 }
647 /*
648 * Remove vnode so that it will be reloaded by VFS_VGET and
649 * checked to see if it is an alias of an existing entry in
650 * the inode cache.
651 */
652 /* Used to be vput, but that causes us to call VOP_INACTIVE twice. */
653
654 VOP_UNLOCK(*vpp, 0);
655 (*vpp)->v_type = VNON;
656 vgone(*vpp);
657 error = VFS_VGET(mp, ino, vpp);
658
659 if (error != 0) {
660 *vpp = NULL;
661 return (error);
662 }
663 return (0);
664 }
665
666 int
667 lfs_create(void *v)
668 {
669 struct vop_create_args /* {
670 struct vnode *a_dvp;
671 struct vnode **a_vpp;
672 struct componentname *a_cnp;
673 struct vattr *a_vap;
674 } */ *ap = v;
675 int error;
676
677 if ((error = SET_DIROP_CREATE(ap->a_dvp, ap->a_vpp)) != 0) {
678 vput(ap->a_dvp);
679 return error;
680 }
681 error = ufs_create(ap);
682 SET_ENDOP_CREATE_AP(ap, "create");
683 return (error);
684 }
685
686 int
687 lfs_mkdir(void *v)
688 {
689 struct vop_mkdir_args /* {
690 struct vnode *a_dvp;
691 struct vnode **a_vpp;
692 struct componentname *a_cnp;
693 struct vattr *a_vap;
694 } */ *ap = v;
695 int error;
696
697 if ((error = SET_DIROP_CREATE(ap->a_dvp, ap->a_vpp)) != 0) {
698 vput(ap->a_dvp);
699 return error;
700 }
701 error = ufs_mkdir(ap);
702 SET_ENDOP_CREATE_AP(ap, "mkdir");
703 return (error);
704 }
705
706 int
707 lfs_remove(void *v)
708 {
709 struct vop_remove_args /* {
710 struct vnode *a_dvp;
711 struct vnode *a_vp;
712 struct componentname *a_cnp;
713 } */ *ap = v;
714 struct vnode *dvp, *vp;
715 struct inode *ip;
716 int error;
717
718 dvp = ap->a_dvp;
719 vp = ap->a_vp;
720 ip = VTOI(vp);
721 if ((error = SET_DIROP_REMOVE(dvp, vp)) != 0) {
722 if (dvp == vp)
723 vrele(vp);
724 else
725 vput(vp);
726 vput(dvp);
727 return error;
728 }
729 error = ufs_remove(ap);
730 if (ip->i_nlink == 0)
731 lfs_orphan(ip->i_lfs, ip->i_number);
732 SET_ENDOP_REMOVE(ip->i_lfs, dvp, ap->a_vp, "remove");
733 return (error);
734 }
735
736 int
737 lfs_rmdir(void *v)
738 {
739 struct vop_rmdir_args /* {
740 struct vnodeop_desc *a_desc;
741 struct vnode *a_dvp;
742 struct vnode *a_vp;
743 struct componentname *a_cnp;
744 } */ *ap = v;
745 struct vnode *vp;
746 struct inode *ip;
747 int error;
748
749 vp = ap->a_vp;
750 ip = VTOI(vp);
751 if ((error = SET_DIROP_REMOVE(ap->a_dvp, ap->a_vp)) != 0) {
752 if (ap->a_dvp == vp)
753 vrele(ap->a_dvp);
754 else
755 vput(ap->a_dvp);
756 vput(vp);
757 return error;
758 }
759 error = ufs_rmdir(ap);
760 if (ip->i_nlink == 0)
761 lfs_orphan(ip->i_lfs, ip->i_number);
762 SET_ENDOP_REMOVE(ip->i_lfs, ap->a_dvp, ap->a_vp, "rmdir");
763 return (error);
764 }
765
766 int
767 lfs_link(void *v)
768 {
769 struct vop_link_args /* {
770 struct vnode *a_dvp;
771 struct vnode *a_vp;
772 struct componentname *a_cnp;
773 } */ *ap = v;
774 int error;
775 struct vnode **vpp = NULL;
776
777 if ((error = SET_DIROP_CREATE(ap->a_dvp, vpp)) != 0) {
778 vput(ap->a_dvp);
779 return error;
780 }
781 error = ufs_link(ap);
782 SET_ENDOP_CREATE(VTOI(ap->a_dvp)->i_lfs, ap->a_dvp, vpp, "link");
783 return (error);
784 }
785
786 int
787 lfs_rename(void *v)
788 {
789 struct vop_rename_args /* {
790 struct vnode *a_fdvp;
791 struct vnode *a_fvp;
792 struct componentname *a_fcnp;
793 struct vnode *a_tdvp;
794 struct vnode *a_tvp;
795 struct componentname *a_tcnp;
796 } */ *ap = v;
797 struct vnode *tvp, *fvp, *tdvp, *fdvp;
798 struct componentname *tcnp, *fcnp;
799 int error;
800 struct lfs *fs;
801
802 fs = VTOI(ap->a_fdvp)->i_lfs;
803 tvp = ap->a_tvp;
804 tdvp = ap->a_tdvp;
805 tcnp = ap->a_tcnp;
806 fvp = ap->a_fvp;
807 fdvp = ap->a_fdvp;
808 fcnp = ap->a_fcnp;
809
810 /*
811 * Check for cross-device rename.
812 * If it is, we don't want to set dirops, just error out.
813 * (In particular note that MARK_VNODE(tdvp) will DTWT on
814 * a cross-device rename.)
815 *
816 * Copied from ufs_rename.
817 */
818 if ((fvp->v_mount != tdvp->v_mount) ||
819 (tvp && (fvp->v_mount != tvp->v_mount))) {
820 error = EXDEV;
821 goto errout;
822 }
823
824 /*
825 * Check to make sure we're not renaming a vnode onto itself
826 * (deleting a hard link by renaming one name onto another);
827 * if we are we can't recursively call VOP_REMOVE since that
828 * would leave us with an unaccounted-for number of live dirops.
829 *
830 * Inline the relevant section of ufs_rename here, *before*
831 * calling SET_DIROP_REMOVE.
832 */
833 if (tvp && ((VTOI(tvp)->i_flags & (IMMUTABLE | APPEND)) ||
834 (VTOI(tdvp)->i_flags & APPEND))) {
835 error = EPERM;
836 goto errout;
837 }
838 if (fvp == tvp) {
839 if (fvp->v_type == VDIR) {
840 error = EINVAL;
841 goto errout;
842 }
843
844 /* Release destination completely. */
845 VOP_ABORTOP(tdvp, tcnp);
846 vput(tdvp);
847 vput(tvp);
848
849 /* Delete source. */
850 vrele(fvp);
851 fcnp->cn_flags &= ~(MODMASK | SAVESTART);
852 fcnp->cn_flags |= LOCKPARENT | LOCKLEAF;
853 fcnp->cn_nameiop = DELETE;
854 vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY);
855 if ((error = relookup(fdvp, &fvp, fcnp))) {
856 vput(fdvp);
857 return (error);
858 }
859 return (VOP_REMOVE(fdvp, fvp, fcnp));
860 }
861
862 if ((error = SET_DIROP_REMOVE(tdvp, tvp)) != 0)
863 goto errout;
864 MARK_VNODE(fdvp);
865 MARK_VNODE(fvp);
866
867 error = ufs_rename(ap);
868 UNMARK_VNODE(fdvp);
869 UNMARK_VNODE(fvp);
870 SET_ENDOP_REMOVE(fs, tdvp, tvp, "rename");
871 return (error);
872
873 errout:
874 VOP_ABORTOP(tdvp, ap->a_tcnp); /* XXX, why not in NFS? */
875 if (tdvp == tvp)
876 vrele(tdvp);
877 else
878 vput(tdvp);
879 if (tvp)
880 vput(tvp);
881 VOP_ABORTOP(fdvp, ap->a_fcnp); /* XXX, why not in NFS? */
882 vrele(fdvp);
883 vrele(fvp);
884 return (error);
885 }
886
887 /* XXX hack to avoid calling ITIMES in getattr */
888 int
889 lfs_getattr(void *v)
890 {
891 struct vop_getattr_args /* {
892 struct vnode *a_vp;
893 struct vattr *a_vap;
894 kauth_cred_t a_cred;
895 } */ *ap = v;
896 struct vnode *vp = ap->a_vp;
897 struct inode *ip = VTOI(vp);
898 struct vattr *vap = ap->a_vap;
899 struct lfs *fs = ip->i_lfs;
900 /*
901 * Copy from inode table
902 */
903 vap->va_fsid = ip->i_dev;
904 vap->va_fileid = ip->i_number;
905 vap->va_mode = ip->i_mode & ~IFMT;
906 vap->va_nlink = ip->i_nlink;
907 vap->va_uid = ip->i_uid;
908 vap->va_gid = ip->i_gid;
909 vap->va_rdev = (dev_t)ip->i_ffs1_rdev;
910 vap->va_size = vp->v_size;
911 vap->va_atime.tv_sec = ip->i_ffs1_atime;
912 vap->va_atime.tv_nsec = ip->i_ffs1_atimensec;
913 vap->va_mtime.tv_sec = ip->i_ffs1_mtime;
914 vap->va_mtime.tv_nsec = ip->i_ffs1_mtimensec;
915 vap->va_ctime.tv_sec = ip->i_ffs1_ctime;
916 vap->va_ctime.tv_nsec = ip->i_ffs1_ctimensec;
917 vap->va_flags = ip->i_flags;
918 vap->va_gen = ip->i_gen;
919 /* this doesn't belong here */
920 if (vp->v_type == VBLK)
921 vap->va_blocksize = BLKDEV_IOSIZE;
922 else if (vp->v_type == VCHR)
923 vap->va_blocksize = MAXBSIZE;
924 else
925 vap->va_blocksize = vp->v_mount->mnt_stat.f_iosize;
926 vap->va_bytes = fsbtob(fs, (u_quad_t)ip->i_lfs_effnblks);
927 vap->va_type = vp->v_type;
928 vap->va_filerev = ip->i_modrev;
929 return (0);
930 }
931
932 /*
933 * Check to make sure the inode blocks won't choke the buffer
934 * cache, then call ufs_setattr as usual.
935 */
936 int
937 lfs_setattr(void *v)
938 {
939 struct vop_setattr_args /* {
940 struct vnode *a_vp;
941 struct vattr *a_vap;
942 kauth_cred_t a_cred;
943 } */ *ap = v;
944 struct vnode *vp = ap->a_vp;
945
946 lfs_check(vp, LFS_UNUSED_LBN, 0);
947 return ufs_setattr(v);
948 }
949
950 /*
951 * Release the block we hold on lfs_newseg wrapping. Called on file close,
952 * or explicitly from LFCNWRAPGO. Called with the interlock held.
953 */
954 static int
955 lfs_wrapgo(struct lfs *fs, struct inode *ip, int waitfor)
956 {
957 if (fs->lfs_stoplwp != curlwp)
958 return EBUSY;
959
960 fs->lfs_stoplwp = NULL;
961 cv_signal(&fs->lfs_stopcv);
962
963 KASSERT(fs->lfs_nowrap > 0);
964 if (fs->lfs_nowrap <= 0) {
965 return 0;
966 }
967
968 if (--fs->lfs_nowrap == 0) {
969 log(LOG_NOTICE, "%s: re-enabled log wrap\n", fs->lfs_fsmnt);
970 wakeup(&fs->lfs_wrappass);
971 lfs_wakeup_cleaner(fs);
972 }
973 if (waitfor) {
974 mtsleep(&fs->lfs_nextseg, PCATCH | PUSER, "segment",
975 0, &lfs_lock);
976 }
977
978 return 0;
979 }
980
981 /*
982 * Close called
983 */
984 /* ARGSUSED */
985 int
986 lfs_close(void *v)
987 {
988 struct vop_close_args /* {
989 struct vnode *a_vp;
990 int a_fflag;
991 kauth_cred_t a_cred;
992 } */ *ap = v;
993 struct vnode *vp = ap->a_vp;
994 struct inode *ip = VTOI(vp);
995 struct lfs *fs = ip->i_lfs;
996
997 if ((ip->i_number == ROOTINO || ip->i_number == LFS_IFILE_INUM) &&
998 fs->lfs_stoplwp == curlwp) {
999 mutex_enter(&lfs_lock);
1000 log(LOG_NOTICE, "lfs_close: releasing log wrap control\n");
1001 lfs_wrapgo(fs, ip, 0);
1002 mutex_exit(&lfs_lock);
1003 }
1004
1005 if (vp == ip->i_lfs->lfs_ivnode &&
1006 vp->v_mount->mnt_iflag & IMNT_UNMOUNT)
1007 return 0;
1008
1009 if (vp->v_usecount > 1 && vp != ip->i_lfs->lfs_ivnode) {
1010 LFS_ITIMES(ip, NULL, NULL, NULL);
1011 }
1012 return (0);
1013 }
1014
1015 /*
1016 * Close wrapper for special devices.
1017 *
1018 * Update the times on the inode then do device close.
1019 */
1020 int
1021 lfsspec_close(void *v)
1022 {
1023 struct vop_close_args /* {
1024 struct vnode *a_vp;
1025 int a_fflag;
1026 kauth_cred_t a_cred;
1027 } */ *ap = v;
1028 struct vnode *vp;
1029 struct inode *ip;
1030
1031 vp = ap->a_vp;
1032 ip = VTOI(vp);
1033 if (vp->v_usecount > 1) {
1034 LFS_ITIMES(ip, NULL, NULL, NULL);
1035 }
1036 return (VOCALL (spec_vnodeop_p, VOFFSET(vop_close), ap));
1037 }
1038
1039 /*
1040 * Close wrapper for fifo's.
1041 *
1042 * Update the times on the inode then do device close.
1043 */
1044 int
1045 lfsfifo_close(void *v)
1046 {
1047 struct vop_close_args /* {
1048 struct vnode *a_vp;
1049 int a_fflag;
1050 kauth_cred_ a_cred;
1051 } */ *ap = v;
1052 struct vnode *vp;
1053 struct inode *ip;
1054
1055 vp = ap->a_vp;
1056 ip = VTOI(vp);
1057 if (ap->a_vp->v_usecount > 1) {
1058 LFS_ITIMES(ip, NULL, NULL, NULL);
1059 }
1060 return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_close), ap));
1061 }
1062
1063 /*
1064 * Reclaim an inode so that it can be used for other purposes.
1065 */
1066
1067 int
1068 lfs_reclaim(void *v)
1069 {
1070 struct vop_reclaim_args /* {
1071 struct vnode *a_vp;
1072 } */ *ap = v;
1073 struct vnode *vp = ap->a_vp;
1074 struct inode *ip = VTOI(vp);
1075 struct lfs *fs = ip->i_lfs;
1076 int error;
1077
1078 mutex_enter(&lfs_lock);
1079 LFS_CLR_UINO(ip, IN_ALLMOD);
1080 mutex_exit(&lfs_lock);
1081 if ((error = ufs_reclaim(vp)))
1082 return (error);
1083
1084 /*
1085 * Take us off the paging and/or dirop queues if we were on them.
1086 * We shouldn't be on them.
1087 */
1088 mutex_enter(&lfs_lock);
1089 if (ip->i_flags & IN_PAGING) {
1090 log(LOG_WARNING, "%s: reclaimed vnode is IN_PAGING\n",
1091 fs->lfs_fsmnt);
1092 ip->i_flags &= ~IN_PAGING;
1093 TAILQ_REMOVE(&fs->lfs_pchainhd, ip, i_lfs_pchain);
1094 }
1095 if (vp->v_uflag & VU_DIROP) {
1096 panic("reclaimed vnode is VU_DIROP");
1097 vp->v_uflag &= ~VU_DIROP;
1098 TAILQ_REMOVE(&fs->lfs_dchainhd, ip, i_lfs_dchain);
1099 }
1100 mutex_exit(&lfs_lock);
1101
1102 pool_put(&lfs_dinode_pool, ip->i_din.ffs1_din);
1103 lfs_deregister_all(vp);
1104 pool_put(&lfs_inoext_pool, ip->inode_ext.lfs);
1105 ip->inode_ext.lfs = NULL;
1106 genfs_node_destroy(vp);
1107 pool_put(&lfs_inode_pool, vp->v_data);
1108 vp->v_data = NULL;
1109 return (0);
1110 }
1111
1112 /*
1113 * Read a block from a storage device.
1114 * In order to avoid reading blocks that are in the process of being
1115 * written by the cleaner---and hence are not mutexed by the normal
1116 * buffer cache / page cache mechanisms---check for collisions before
1117 * reading.
1118 *
1119 * We inline ufs_strategy to make sure that the VOP_BMAP occurs *before*
1120 * the active cleaner test.
1121 *
1122 * XXX This code assumes that lfs_markv makes synchronous checkpoints.
1123 */
1124 int
1125 lfs_strategy(void *v)
1126 {
1127 struct vop_strategy_args /* {
1128 struct vnode *a_vp;
1129 struct buf *a_bp;
1130 } */ *ap = v;
1131 struct buf *bp;
1132 struct lfs *fs;
1133 struct vnode *vp;
1134 struct inode *ip;
1135 daddr_t tbn;
1136 int i, sn, error, slept;
1137
1138 bp = ap->a_bp;
1139 vp = ap->a_vp;
1140 ip = VTOI(vp);
1141 fs = ip->i_lfs;
1142
1143 /* lfs uses its strategy routine only for read */
1144 KASSERT(bp->b_flags & B_READ);
1145
1146 if (vp->v_type == VBLK || vp->v_type == VCHR)
1147 panic("lfs_strategy: spec");
1148 KASSERT(bp->b_bcount != 0);
1149 if (bp->b_blkno == bp->b_lblkno) {
1150 error = VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno,
1151 NULL);
1152 if (error) {
1153 bp->b_error = error;
1154 bp->b_resid = bp->b_bcount;
1155 biodone(bp);
1156 return (error);
1157 }
1158 if ((long)bp->b_blkno == -1) /* no valid data */
1159 clrbuf(bp);
1160 }
1161 if ((long)bp->b_blkno < 0) { /* block is not on disk */
1162 bp->b_resid = bp->b_bcount;
1163 biodone(bp);
1164 return (0);
1165 }
1166
1167 slept = 1;
1168 mutex_enter(&lfs_lock);
1169 while (slept && fs->lfs_seglock) {
1170 mutex_exit(&lfs_lock);
1171 /*
1172 * Look through list of intervals.
1173 * There will only be intervals to look through
1174 * if the cleaner holds the seglock.
1175 * Since the cleaner is synchronous, we can trust
1176 * the list of intervals to be current.
1177 */
1178 tbn = dbtofsb(fs, bp->b_blkno);
1179 sn = dtosn(fs, tbn);
1180 slept = 0;
1181 for (i = 0; i < fs->lfs_cleanind; i++) {
1182 if (sn == dtosn(fs, fs->lfs_cleanint[i]) &&
1183 tbn >= fs->lfs_cleanint[i]) {
1184 DLOG((DLOG_CLEAN,
1185 "lfs_strategy: ino %d lbn %" PRId64
1186 " ind %d sn %d fsb %" PRIx32
1187 " given sn %d fsb %" PRIx64 "\n",
1188 ip->i_number, bp->b_lblkno, i,
1189 dtosn(fs, fs->lfs_cleanint[i]),
1190 fs->lfs_cleanint[i], sn, tbn));
1191 DLOG((DLOG_CLEAN,
1192 "lfs_strategy: sleeping on ino %d lbn %"
1193 PRId64 "\n", ip->i_number, bp->b_lblkno));
1194 mutex_enter(&lfs_lock);
1195 if (LFS_SEGLOCK_HELD(fs) && fs->lfs_iocount) {
1196 /* Cleaner can't wait for itself */
1197 mtsleep(&fs->lfs_iocount,
1198 (PRIBIO + 1) | PNORELOCK,
1199 "clean2", 0,
1200 &lfs_lock);
1201 slept = 1;
1202 break;
1203 } else if (fs->lfs_seglock) {
1204 mtsleep(&fs->lfs_seglock,
1205 (PRIBIO + 1) | PNORELOCK,
1206 "clean1", 0,
1207 &lfs_lock);
1208 slept = 1;
1209 break;
1210 }
1211 mutex_exit(&lfs_lock);
1212 }
1213 }
1214 mutex_enter(&lfs_lock);
1215 }
1216 mutex_exit(&lfs_lock);
1217
1218 vp = ip->i_devvp;
1219 VOP_STRATEGY(vp, bp);
1220 return (0);
1221 }
1222
1223 void
1224 lfs_flush_dirops(struct lfs *fs)
1225 {
1226 struct inode *ip, *nip;
1227 struct vnode *vp;
1228 extern int lfs_dostats;
1229 struct segment *sp;
1230 int waslocked;
1231
1232 ASSERT_MAYBE_SEGLOCK(fs);
1233 KASSERT(fs->lfs_nadirop == 0);
1234
1235 if (fs->lfs_ronly)
1236 return;
1237
1238 mutex_enter(&lfs_lock);
1239 if (TAILQ_FIRST(&fs->lfs_dchainhd) == NULL) {
1240 mutex_exit(&lfs_lock);
1241 return;
1242 } else
1243 mutex_exit(&lfs_lock);
1244
1245 if (lfs_dostats)
1246 ++lfs_stats.flush_invoked;
1247
1248 /*
1249 * Inline lfs_segwrite/lfs_writevnodes, but just for dirops.
1250 * Technically this is a checkpoint (the on-disk state is valid)
1251 * even though we are leaving out all the file data.
1252 */
1253 lfs_imtime(fs);
1254 lfs_seglock(fs, SEGM_CKP);
1255 sp = fs->lfs_sp;
1256
1257 /*
1258 * lfs_writevnodes, optimized to get dirops out of the way.
1259 * Only write dirops, and don't flush files' pages, only
1260 * blocks from the directories.
1261 *
1262 * We don't need to vref these files because they are
1263 * dirops and so hold an extra reference until the
1264 * segunlock clears them of that status.
1265 *
1266 * We don't need to check for IN_ADIROP because we know that
1267 * no dirops are active.
1268 *
1269 */
1270 mutex_enter(&lfs_lock);
1271 for (ip = TAILQ_FIRST(&fs->lfs_dchainhd); ip != NULL; ip = nip) {
1272 nip = TAILQ_NEXT(ip, i_lfs_dchain);
1273 mutex_exit(&lfs_lock);
1274 vp = ITOV(ip);
1275
1276 KASSERT((ip->i_flag & IN_ADIROP) == 0);
1277
1278 /*
1279 * All writes to directories come from dirops; all
1280 * writes to files' direct blocks go through the page
1281 * cache, which we're not touching. Reads to files
1282 * and/or directories will not be affected by writing
1283 * directory blocks inodes and file inodes. So we don't
1284 * really need to lock. If we don't lock, though,
1285 * make sure that we don't clear IN_MODIFIED
1286 * unnecessarily.
1287 */
1288 if (vp->v_iflag & VI_XLOCK) {
1289 mutex_enter(&lfs_lock);
1290 continue;
1291 }
1292 waslocked = VOP_ISLOCKED(vp);
1293 if (vp->v_type != VREG &&
1294 ((ip->i_flag & IN_ALLMOD) || !VPISEMPTY(vp))) {
1295 lfs_writefile(fs, sp, vp);
1296 if (!VPISEMPTY(vp) && !WRITEINPROG(vp) &&
1297 !(ip->i_flag & IN_ALLMOD)) {
1298 mutex_enter(&lfs_lock);
1299 LFS_SET_UINO(ip, IN_MODIFIED);
1300 mutex_exit(&lfs_lock);
1301 }
1302 }
1303 KDASSERT(ip->i_number != LFS_IFILE_INUM);
1304 (void) lfs_writeinode(fs, sp, ip);
1305 mutex_enter(&lfs_lock);
1306 if (waslocked == LK_EXCLOTHER)
1307 LFS_SET_UINO(ip, IN_MODIFIED);
1308 }
1309 mutex_exit(&lfs_lock);
1310 /* We've written all the dirops there are */
1311 ((SEGSUM *)(sp->segsum))->ss_flags &= ~(SS_CONT);
1312 lfs_finalize_fs_seguse(fs);
1313 (void) lfs_writeseg(fs, sp);
1314 lfs_segunlock(fs);
1315 }
1316
1317 /*
1318 * Flush all vnodes for which the pagedaemon has requested pageouts.
1319 * Skip over any files that are marked VU_DIROP (since lfs_flush_dirop()
1320 * has just run, this would be an error). If we have to skip a vnode
1321 * for any reason, just skip it; if we have to wait for the cleaner,
1322 * abort. The writer daemon will call us again later.
1323 */
1324 void
1325 lfs_flush_pchain(struct lfs *fs)
1326 {
1327 struct inode *ip, *nip;
1328 struct vnode *vp;
1329 extern int lfs_dostats;
1330 struct segment *sp;
1331 int error;
1332
1333 ASSERT_NO_SEGLOCK(fs);
1334
1335 if (fs->lfs_ronly)
1336 return;
1337
1338 mutex_enter(&lfs_lock);
1339 if (TAILQ_FIRST(&fs->lfs_pchainhd) == NULL) {
1340 mutex_exit(&lfs_lock);
1341 return;
1342 } else
1343 mutex_exit(&lfs_lock);
1344
1345 /* Get dirops out of the way */
1346 lfs_flush_dirops(fs);
1347
1348 if (lfs_dostats)
1349 ++lfs_stats.flush_invoked;
1350
1351 /*
1352 * Inline lfs_segwrite/lfs_writevnodes, but just for pageouts.
1353 */
1354 lfs_imtime(fs);
1355 lfs_seglock(fs, 0);
1356 sp = fs->lfs_sp;
1357
1358 /*
1359 * lfs_writevnodes, optimized to clear pageout requests.
1360 * Only write non-dirop files that are in the pageout queue.
1361 * We're very conservative about what we write; we want to be
1362 * fast and async.
1363 */
1364 mutex_enter(&lfs_lock);
1365 top:
1366 for (ip = TAILQ_FIRST(&fs->lfs_pchainhd); ip != NULL; ip = nip) {
1367 nip = TAILQ_NEXT(ip, i_lfs_pchain);
1368 vp = ITOV(ip);
1369
1370 if (!(ip->i_flags & IN_PAGING))
1371 goto top;
1372
1373 mutex_enter(&vp->v_interlock);
1374 if ((vp->v_iflag & VI_XLOCK) || (vp->v_uflag & VU_DIROP) != 0) {
1375 mutex_exit(&vp->v_interlock);
1376 continue;
1377 }
1378 if (vp->v_type != VREG) {
1379 mutex_exit(&vp->v_interlock);
1380 continue;
1381 }
1382 if (lfs_vref(vp))
1383 continue;
1384 mutex_exit(&lfs_lock);
1385
1386 if (VOP_ISLOCKED(vp)) {
1387 lfs_vunref(vp);
1388 mutex_enter(&lfs_lock);
1389 continue;
1390 }
1391
1392 error = lfs_writefile(fs, sp, vp);
1393 if (!VPISEMPTY(vp) && !WRITEINPROG(vp) &&
1394 !(ip->i_flag & IN_ALLMOD)) {
1395 mutex_enter(&lfs_lock);
1396 LFS_SET_UINO(ip, IN_MODIFIED);
1397 mutex_exit(&lfs_lock);
1398 }
1399 KDASSERT(ip->i_number != LFS_IFILE_INUM);
1400 (void) lfs_writeinode(fs, sp, ip);
1401
1402 lfs_vunref(vp);
1403
1404 if (error == EAGAIN) {
1405 lfs_writeseg(fs, sp);
1406 mutex_enter(&lfs_lock);
1407 break;
1408 }
1409 mutex_enter(&lfs_lock);
1410 }
1411 mutex_exit(&lfs_lock);
1412 (void) lfs_writeseg(fs, sp);
1413 lfs_segunlock(fs);
1414 }
1415
1416 /*
1417 * Provide a fcntl interface to sys_lfs_{segwait,bmapv,markv}.
1418 */
1419 int
1420 lfs_fcntl(void *v)
1421 {
1422 struct vop_fcntl_args /* {
1423 struct vnode *a_vp;
1424 u_int a_command;
1425 void * a_data;
1426 int a_fflag;
1427 kauth_cred_t a_cred;
1428 } */ *ap = v;
1429 struct timeval *tvp;
1430 BLOCK_INFO *blkiov;
1431 CLEANERINFO *cip;
1432 SEGUSE *sup;
1433 int blkcnt, error, oclean;
1434 size_t fh_size;
1435 struct lfs_fcntl_markv blkvp;
1436 struct lwp *l;
1437 fsid_t *fsidp;
1438 struct lfs *fs;
1439 struct buf *bp;
1440 fhandle_t *fhp;
1441 daddr_t off;
1442
1443 /* Only respect LFS fcntls on fs root or Ifile */
1444 if (VTOI(ap->a_vp)->i_number != ROOTINO &&
1445 VTOI(ap->a_vp)->i_number != LFS_IFILE_INUM) {
1446 return ufs_fcntl(v);
1447 }
1448
1449 /* Avoid locking a draining lock */
1450 if (ap->a_vp->v_mount->mnt_iflag & IMNT_UNMOUNT) {
1451 return ESHUTDOWN;
1452 }
1453
1454 /* LFS control and monitoring fcntls are available only to root */
1455 l = curlwp;
1456 if (((ap->a_command & 0xff00) >> 8) == 'L' &&
1457 (error = kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER,
1458 NULL)) != 0)
1459 return (error);
1460
1461 fs = VTOI(ap->a_vp)->i_lfs;
1462 fsidp = &ap->a_vp->v_mount->mnt_stat.f_fsidx;
1463
1464 error = 0;
1465 switch ((int)ap->a_command) {
1466 case LFCNSEGWAITALL:
1467 case LFCNSEGWAITALL_COMPAT:
1468 fsidp = NULL;
1469 /* FALLSTHROUGH */
1470 case LFCNSEGWAIT:
1471 case LFCNSEGWAIT_COMPAT:
1472 tvp = (struct timeval *)ap->a_data;
1473 mutex_enter(&lfs_lock);
1474 ++fs->lfs_sleepers;
1475 mutex_exit(&lfs_lock);
1476
1477 error = lfs_segwait(fsidp, tvp);
1478
1479 mutex_enter(&lfs_lock);
1480 if (--fs->lfs_sleepers == 0)
1481 wakeup(&fs->lfs_sleepers);
1482 mutex_exit(&lfs_lock);
1483 return error;
1484
1485 case LFCNBMAPV:
1486 case LFCNMARKV:
1487 blkvp = *(struct lfs_fcntl_markv *)ap->a_data;
1488
1489 blkcnt = blkvp.blkcnt;
1490 if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT)
1491 return (EINVAL);
1492 blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
1493 if ((error = copyin(blkvp.blkiov, blkiov,
1494 blkcnt * sizeof(BLOCK_INFO))) != 0) {
1495 lfs_free(fs, blkiov, LFS_NB_BLKIOV);
1496 return error;
1497 }
1498
1499 mutex_enter(&lfs_lock);
1500 ++fs->lfs_sleepers;
1501 mutex_exit(&lfs_lock);
1502 if (ap->a_command == LFCNBMAPV)
1503 error = lfs_bmapv(l->l_proc, fsidp, blkiov, blkcnt);
1504 else /* LFCNMARKV */
1505 error = lfs_markv(l->l_proc, fsidp, blkiov, blkcnt);
1506 if (error == 0)
1507 error = copyout(blkiov, blkvp.blkiov,
1508 blkcnt * sizeof(BLOCK_INFO));
1509 mutex_enter(&lfs_lock);
1510 if (--fs->lfs_sleepers == 0)
1511 wakeup(&fs->lfs_sleepers);
1512 mutex_exit(&lfs_lock);
1513 lfs_free(fs, blkiov, LFS_NB_BLKIOV);
1514 return error;
1515
1516 case LFCNRECLAIM:
1517 /*
1518 * Flush dirops and write Ifile, allowing empty segments
1519 * to be immediately reclaimed.
1520 */
1521 lfs_writer_enter(fs, "pndirop");
1522 off = fs->lfs_offset;
1523 lfs_seglock(fs, SEGM_FORCE_CKP | SEGM_CKP);
1524 lfs_flush_dirops(fs);
1525 LFS_CLEANERINFO(cip, fs, bp);
1526 oclean = cip->clean;
1527 LFS_SYNC_CLEANERINFO(cip, fs, bp, 1);
1528 lfs_segwrite(ap->a_vp->v_mount, SEGM_FORCE_CKP);
1529 fs->lfs_sp->seg_flags |= SEGM_PROT;
1530 lfs_segunlock(fs);
1531 lfs_writer_leave(fs);
1532
1533 #ifdef DEBUG
1534 LFS_CLEANERINFO(cip, fs, bp);
1535 DLOG((DLOG_CLEAN, "lfs_fcntl: reclaim wrote %" PRId64
1536 " blocks, cleaned %" PRId32 " segments (activesb %d)\n",
1537 fs->lfs_offset - off, cip->clean - oclean,
1538 fs->lfs_activesb));
1539 LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
1540 #endif
1541
1542 return 0;
1543
1544 #ifdef COMPAT_30
1545 case LFCNIFILEFH_COMPAT:
1546 /* Return the filehandle of the Ifile */
1547 if ((error = kauth_authorize_generic(l->l_cred,
1548 KAUTH_GENERIC_ISSUSER, NULL)) != 0)
1549 return (error);
1550 fhp = (struct fhandle *)ap->a_data;
1551 fhp->fh_fsid = *fsidp;
1552 fh_size = 16; /* former VFS_MAXFIDSIZ */
1553 return lfs_vptofh(fs->lfs_ivnode, &(fhp->fh_fid), &fh_size);
1554 #endif
1555
1556 case LFCNIFILEFH_COMPAT2:
1557 case LFCNIFILEFH:
1558 /* Return the filehandle of the Ifile */
1559 fhp = (struct fhandle *)ap->a_data;
1560 fhp->fh_fsid = *fsidp;
1561 fh_size = sizeof(struct lfs_fhandle) -
1562 offsetof(fhandle_t, fh_fid);
1563 return lfs_vptofh(fs->lfs_ivnode, &(fhp->fh_fid), &fh_size);
1564
1565 case LFCNREWIND:
1566 /* Move lfs_offset to the lowest-numbered segment */
1567 return lfs_rewind(fs, *(int *)ap->a_data);
1568
1569 case LFCNINVAL:
1570 /* Mark a segment SEGUSE_INVAL */
1571 LFS_SEGENTRY(sup, fs, *(int *)ap->a_data, bp);
1572 if (sup->su_nbytes > 0) {
1573 brelse(bp, 0);
1574 lfs_unset_inval_all(fs);
1575 return EBUSY;
1576 }
1577 sup->su_flags |= SEGUSE_INVAL;
1578 VOP_BWRITE(bp);
1579 return 0;
1580
1581 case LFCNRESIZE:
1582 /* Resize the filesystem */
1583 return lfs_resize_fs(fs, *(int *)ap->a_data);
1584
1585 case LFCNWRAPSTOP:
1586 case LFCNWRAPSTOP_COMPAT:
1587 /*
1588 * Hold lfs_newseg at segment 0; if requested, sleep until
1589 * the filesystem wraps around. To support external agents
1590 * (dump, fsck-based regression test) that need to look at
1591 * a snapshot of the filesystem, without necessarily
1592 * requiring that all fs activity stops.
1593 */
1594 if (fs->lfs_stoplwp == curlwp)
1595 return EALREADY;
1596
1597 mutex_enter(&lfs_lock);
1598 while (fs->lfs_stoplwp != NULL)
1599 cv_wait(&fs->lfs_stopcv, &lfs_lock);
1600 fs->lfs_stoplwp = curlwp;
1601 if (fs->lfs_nowrap == 0)
1602 log(LOG_NOTICE, "%s: disabled log wrap\n", fs->lfs_fsmnt);
1603 ++fs->lfs_nowrap;
1604 if (*(int *)ap->a_data == 1 ||
1605 ap->a_command == LFCNWRAPSTOP_COMPAT) {
1606 log(LOG_NOTICE, "LFCNSTOPWRAP waiting for log wrap\n");
1607 error = mtsleep(&fs->lfs_nowrap, PCATCH | PUSER,
1608 "segwrap", 0, &lfs_lock);
1609 log(LOG_NOTICE, "LFCNSTOPWRAP done waiting\n");
1610 if (error) {
1611 lfs_wrapgo(fs, VTOI(ap->a_vp), 0);
1612 }
1613 }
1614 mutex_exit(&lfs_lock);
1615 return 0;
1616
1617 case LFCNWRAPGO:
1618 case LFCNWRAPGO_COMPAT:
1619 /*
1620 * Having done its work, the agent wakes up the writer.
1621 * If the argument is 1, it sleeps until a new segment
1622 * is selected.
1623 */
1624 mutex_enter(&lfs_lock);
1625 error = lfs_wrapgo(fs, VTOI(ap->a_vp),
1626 (ap->a_command == LFCNWRAPGO_COMPAT ? 1 :
1627 *((int *)ap->a_data)));
1628 mutex_exit(&lfs_lock);
1629 return error;
1630
1631 case LFCNWRAPPASS:
1632 if ((VTOI(ap->a_vp)->i_lfs_iflags & LFSI_WRAPWAIT))
1633 return EALREADY;
1634 mutex_enter(&lfs_lock);
1635 if (fs->lfs_stoplwp != curlwp) {
1636 mutex_exit(&lfs_lock);
1637 return EALREADY;
1638 }
1639 if (fs->lfs_nowrap == 0) {
1640 mutex_exit(&lfs_lock);
1641 return EBUSY;
1642 }
1643 fs->lfs_wrappass = 1;
1644 wakeup(&fs->lfs_wrappass);
1645 /* Wait for the log to wrap, if asked */
1646 if (*(int *)ap->a_data) {
1647 mutex_enter(&ap->a_vp->v_interlock);
1648 lfs_vref(ap->a_vp);
1649 VTOI(ap->a_vp)->i_lfs_iflags |= LFSI_WRAPWAIT;
1650 log(LOG_NOTICE, "LFCNPASS waiting for log wrap\n");
1651 error = mtsleep(&fs->lfs_nowrap, PCATCH | PUSER,
1652 "segwrap", 0, &lfs_lock);
1653 log(LOG_NOTICE, "LFCNPASS done waiting\n");
1654 VTOI(ap->a_vp)->i_lfs_iflags &= ~LFSI_WRAPWAIT;
1655 lfs_vunref(ap->a_vp);
1656 }
1657 mutex_exit(&lfs_lock);
1658 return error;
1659
1660 case LFCNWRAPSTATUS:
1661 mutex_enter(&lfs_lock);
1662 *(int *)ap->a_data = fs->lfs_wrapstatus;
1663 mutex_exit(&lfs_lock);
1664 return 0;
1665
1666 default:
1667 return ufs_fcntl(v);
1668 }
1669 return 0;
1670 }
1671
1672 int
1673 lfs_getpages(void *v)
1674 {
1675 struct vop_getpages_args /* {
1676 struct vnode *a_vp;
1677 voff_t a_offset;
1678 struct vm_page **a_m;
1679 int *a_count;
1680 int a_centeridx;
1681 vm_prot_t a_access_type;
1682 int a_advice;
1683 int a_flags;
1684 } */ *ap = v;
1685
1686 if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM &&
1687 (ap->a_access_type & VM_PROT_WRITE) != 0) {
1688 return EPERM;
1689 }
1690 if ((ap->a_access_type & VM_PROT_WRITE) != 0) {
1691 mutex_enter(&lfs_lock);
1692 LFS_SET_UINO(VTOI(ap->a_vp), IN_MODIFIED);
1693 mutex_exit(&lfs_lock);
1694 }
1695
1696 /*
1697 * we're relying on the fact that genfs_getpages() always read in
1698 * entire filesystem blocks.
1699 */
1700 return genfs_getpages(v);
1701 }
1702
1703 /*
1704 * Wait for a page to become unbusy, possibly printing diagnostic messages
1705 * as well.
1706 *
1707 * Called with vp->v_interlock held; return with it held.
1708 */
1709 static void
1710 wait_for_page(struct vnode *vp, struct vm_page *pg, const char *label)
1711 {
1712 if ((pg->flags & PG_BUSY) == 0)
1713 return; /* Nothing to wait for! */
1714
1715 #if defined(DEBUG) && defined(UVM_PAGE_TRKOWN)
1716 static struct vm_page *lastpg;
1717
1718 if (label != NULL && pg != lastpg) {
1719 if (pg->owner_tag) {
1720 printf("lfs_putpages[%d.%d]: %s: page %p owner %d.%d [%s]\n",
1721 curproc->p_pid, curlwp->l_lid, label,
1722 pg, pg->owner, pg->lowner, pg->owner_tag);
1723 } else {
1724 printf("lfs_putpages[%d.%d]: %s: page %p unowned?!\n",
1725 curproc->p_pid, curlwp->l_lid, label, pg);
1726 }
1727 }
1728 lastpg = pg;
1729 #endif
1730
1731 pg->flags |= PG_WANTED;
1732 UVM_UNLOCK_AND_WAIT(pg, &vp->v_interlock, 0, "lfsput", 0);
1733 mutex_enter(&vp->v_interlock);
1734 }
1735
1736 /*
1737 * This routine is called by lfs_putpages() when it can't complete the
1738 * write because a page is busy. This means that either (1) someone,
1739 * possibly the pagedaemon, is looking at this page, and will give it up
1740 * presently; or (2) we ourselves are holding the page busy in the
1741 * process of being written (either gathered or actually on its way to
1742 * disk). We don't need to give up the segment lock, but we might need
1743 * to call lfs_writeseg() to expedite the page's journey to disk.
1744 *
1745 * Called with vp->v_interlock held; return with it held.
1746 */
1747 /* #define BUSYWAIT */
1748 static void
1749 write_and_wait(struct lfs *fs, struct vnode *vp, struct vm_page *pg,
1750 int seglocked, const char *label)
1751 {
1752 #ifndef BUSYWAIT
1753 struct inode *ip = VTOI(vp);
1754 struct segment *sp = fs->lfs_sp;
1755 int count = 0;
1756
1757 if (pg == NULL)
1758 return;
1759
1760 while (pg->flags & PG_BUSY) {
1761 mutex_exit(&vp->v_interlock);
1762 if (sp->cbpp - sp->bpp > 1) {
1763 /* Write gathered pages */
1764 lfs_updatemeta(sp);
1765 lfs_release_finfo(fs);
1766 (void) lfs_writeseg(fs, sp);
1767
1768 /*
1769 * Reinitialize FIP
1770 */
1771 KASSERT(sp->vp == vp);
1772 lfs_acquire_finfo(fs, ip->i_number,
1773 ip->i_gen);
1774 }
1775 ++count;
1776 mutex_enter(&vp->v_interlock);
1777 wait_for_page(vp, pg, label);
1778 }
1779 if (label != NULL && count > 1)
1780 printf("lfs_putpages[%d]: %s: %sn = %d\n", curproc->p_pid,
1781 label, (count > 0 ? "looping, " : ""), count);
1782 #else
1783 preempt(1);
1784 #endif
1785 }
1786
1787 /*
1788 * Make sure that for all pages in every block in the given range,
1789 * either all are dirty or all are clean. If any of the pages
1790 * we've seen so far are dirty, put the vnode on the paging chain,
1791 * and mark it IN_PAGING.
1792 *
1793 * If checkfirst != 0, don't check all the pages but return at the
1794 * first dirty page.
1795 */
1796 static int
1797 check_dirty(struct lfs *fs, struct vnode *vp,
1798 off_t startoffset, off_t endoffset, off_t blkeof,
1799 int flags, int checkfirst, struct vm_page **pgp)
1800 {
1801 int by_list;
1802 struct vm_page *curpg = NULL; /* XXX: gcc */
1803 struct vm_page *pgs[MAXBSIZE / PAGE_SIZE], *pg;
1804 off_t soff = 0; /* XXX: gcc */
1805 voff_t off;
1806 int i;
1807 int nonexistent;
1808 int any_dirty; /* number of dirty pages */
1809 int dirty; /* number of dirty pages in a block */
1810 int tdirty;
1811 int pages_per_block = fs->lfs_bsize >> PAGE_SHIFT;
1812 int pagedaemon = (curlwp == uvm.pagedaemon_lwp);
1813
1814 ASSERT_MAYBE_SEGLOCK(fs);
1815 top:
1816 by_list = (vp->v_uobj.uo_npages <=
1817 ((endoffset - startoffset) >> PAGE_SHIFT) *
1818 UVM_PAGE_TREE_PENALTY);
1819 any_dirty = 0;
1820
1821 if (by_list) {
1822 curpg = TAILQ_FIRST(&vp->v_uobj.memq);
1823 } else {
1824 soff = startoffset;
1825 }
1826 while (by_list || soff < MIN(blkeof, endoffset)) {
1827 if (by_list) {
1828 /*
1829 * Find the first page in a block. Skip
1830 * blocks outside our area of interest or beyond
1831 * the end of file.
1832 */
1833 if (pages_per_block > 1) {
1834 while (curpg &&
1835 ((curpg->offset & fs->lfs_bmask) ||
1836 curpg->offset >= vp->v_size ||
1837 curpg->offset >= endoffset))
1838 curpg = TAILQ_NEXT(curpg, listq.queue);
1839 }
1840 if (curpg == NULL)
1841 break;
1842 soff = curpg->offset;
1843 }
1844
1845 /*
1846 * Mark all pages in extended range busy; find out if any
1847 * of them are dirty.
1848 */
1849 nonexistent = dirty = 0;
1850 for (i = 0; i == 0 || i < pages_per_block; i++) {
1851 if (by_list && pages_per_block <= 1) {
1852 pgs[i] = pg = curpg;
1853 } else {
1854 off = soff + (i << PAGE_SHIFT);
1855 pgs[i] = pg = uvm_pagelookup(&vp->v_uobj, off);
1856 if (pg == NULL) {
1857 ++nonexistent;
1858 continue;
1859 }
1860 }
1861 KASSERT(pg != NULL);
1862
1863 /*
1864 * If we're holding the segment lock, we can deadlock
1865 * against a process that has our page and is waiting
1866 * for the cleaner, while the cleaner waits for the
1867 * segment lock. Just bail in that case.
1868 */
1869 if ((pg->flags & PG_BUSY) &&
1870 (pagedaemon || LFS_SEGLOCK_HELD(fs))) {
1871 if (i > 0)
1872 uvm_page_unbusy(pgs, i);
1873 DLOG((DLOG_PAGE, "lfs_putpages: avoiding 3-way or pagedaemon deadlock\n"));
1874 if (pgp)
1875 *pgp = pg;
1876 return -1;
1877 }
1878
1879 while (pg->flags & PG_BUSY) {
1880 wait_for_page(vp, pg, NULL);
1881 if (i > 0)
1882 uvm_page_unbusy(pgs, i);
1883 goto top;
1884 }
1885 pg->flags |= PG_BUSY;
1886 UVM_PAGE_OWN(pg, "lfs_putpages");
1887
1888 pmap_page_protect(pg, VM_PROT_NONE);
1889 tdirty = (pmap_clear_modify(pg) ||
1890 (pg->flags & PG_CLEAN) == 0);
1891 dirty += tdirty;
1892 }
1893 if (pages_per_block > 0 && nonexistent >= pages_per_block) {
1894 if (by_list) {
1895 curpg = TAILQ_NEXT(curpg, listq.queue);
1896 } else {
1897 soff += fs->lfs_bsize;
1898 }
1899 continue;
1900 }
1901
1902 any_dirty += dirty;
1903 KASSERT(nonexistent == 0);
1904
1905 /*
1906 * If any are dirty make all dirty; unbusy them,
1907 * but if we were asked to clean, wire them so that
1908 * the pagedaemon doesn't bother us about them while
1909 * they're on their way to disk.
1910 */
1911 for (i = 0; i == 0 || i < pages_per_block; i++) {
1912 pg = pgs[i];
1913 KASSERT(!((pg->flags & PG_CLEAN) && (pg->flags & PG_DELWRI)));
1914 if (dirty) {
1915 pg->flags &= ~PG_CLEAN;
1916 if (flags & PGO_FREE) {
1917 /*
1918 * Wire the page so that
1919 * pdaemon doesn't see it again.
1920 */
1921 mutex_enter(&uvm_pageqlock);
1922 uvm_pagewire(pg);
1923 mutex_exit(&uvm_pageqlock);
1924
1925 /* Suspended write flag */
1926 pg->flags |= PG_DELWRI;
1927 }
1928 }
1929 if (pg->flags & PG_WANTED)
1930 wakeup(pg);
1931 pg->flags &= ~(PG_WANTED|PG_BUSY);
1932 UVM_PAGE_OWN(pg, NULL);
1933 }
1934
1935 if (checkfirst && any_dirty)
1936 break;
1937
1938 if (by_list) {
1939 curpg = TAILQ_NEXT(curpg, listq.queue);
1940 } else {
1941 soff += MAX(PAGE_SIZE, fs->lfs_bsize);
1942 }
1943 }
1944
1945 return any_dirty;
1946 }
1947
1948 /*
1949 * lfs_putpages functions like genfs_putpages except that
1950 *
1951 * (1) It needs to bounds-check the incoming requests to ensure that
1952 * they are block-aligned; if they are not, expand the range and
1953 * do the right thing in case, e.g., the requested range is clean
1954 * but the expanded range is dirty.
1955 *
1956 * (2) It needs to explicitly send blocks to be written when it is done.
1957 * If VOP_PUTPAGES is called without the seglock held, we simply take
1958 * the seglock and let lfs_segunlock wait for us.
1959 * XXX There might be a bad situation if we have to flush a vnode while
1960 * XXX lfs_markv is in operation. As of this writing we panic in this
1961 * XXX case.
1962 *
1963 * Assumptions:
1964 *
1965 * (1) The caller does not hold any pages in this vnode busy. If it does,
1966 * there is a danger that when we expand the page range and busy the
1967 * pages we will deadlock.
1968 *
1969 * (2) We are called with vp->v_interlock held; we must return with it
1970 * released.
1971 *
1972 * (3) We don't absolutely have to free pages right away, provided that
1973 * the request does not have PGO_SYNCIO. When the pagedaemon gives
1974 * us a request with PGO_FREE, we take the pages out of the paging
1975 * queue and wake up the writer, which will handle freeing them for us.
1976 *
1977 * We ensure that for any filesystem block, all pages for that
1978 * block are either resident or not, even if those pages are higher
1979 * than EOF; that means that we will be getting requests to free
1980 * "unused" pages above EOF all the time, and should ignore them.
1981 *
1982 * (4) If we are called with PGO_LOCKED, the finfo array we are to write
1983 * into has been set up for us by lfs_writefile. If not, we will
1984 * have to handle allocating and/or freeing an finfo entry.
1985 *
1986 * XXX note that we're (ab)using PGO_LOCKED as "seglock held".
1987 */
1988
1989 /* How many times to loop before we should start to worry */
1990 #define TOOMANY 4
1991
1992 int
1993 lfs_putpages(void *v)
1994 {
1995 int error;
1996 struct vop_putpages_args /* {
1997 struct vnode *a_vp;
1998 voff_t a_offlo;
1999 voff_t a_offhi;
2000 int a_flags;
2001 } */ *ap = v;
2002 struct vnode *vp;
2003 struct inode *ip;
2004 struct lfs *fs;
2005 struct segment *sp;
2006 off_t origoffset, startoffset, endoffset, origendoffset, blkeof;
2007 off_t off, max_endoffset;
2008 bool seglocked, sync, pagedaemon;
2009 struct vm_page *pg, *busypg;
2010 UVMHIST_FUNC("lfs_putpages"); UVMHIST_CALLED(ubchist);
2011 #ifdef DEBUG
2012 int debug_n_again, debug_n_dirtyclean;
2013 #endif
2014
2015 vp = ap->a_vp;
2016 ip = VTOI(vp);
2017 fs = ip->i_lfs;
2018 sync = (ap->a_flags & PGO_SYNCIO) != 0;
2019 pagedaemon = (curlwp == uvm.pagedaemon_lwp);
2020
2021 /* Putpages does nothing for metadata. */
2022 if (vp == fs->lfs_ivnode || vp->v_type != VREG) {
2023 mutex_exit(&vp->v_interlock);
2024 return 0;
2025 }
2026
2027 /*
2028 * If there are no pages, don't do anything.
2029 */
2030 if (vp->v_uobj.uo_npages == 0) {
2031 if (TAILQ_EMPTY(&vp->v_uobj.memq) &&
2032 (vp->v_iflag & VI_ONWORKLST) &&
2033 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2034 vp->v_iflag &= ~VI_WRMAPDIRTY;
2035 vn_syncer_remove_from_worklist(vp);
2036 }
2037 mutex_exit(&vp->v_interlock);
2038
2039 /* Remove us from paging queue, if we were on it */
2040 mutex_enter(&lfs_lock);
2041 if (ip->i_flags & IN_PAGING) {
2042 ip->i_flags &= ~IN_PAGING;
2043 TAILQ_REMOVE(&fs->lfs_pchainhd, ip, i_lfs_pchain);
2044 }
2045 mutex_exit(&lfs_lock);
2046 return 0;
2047 }
2048
2049 blkeof = blkroundup(fs, ip->i_size);
2050
2051 /*
2052 * Ignore requests to free pages past EOF but in the same block
2053 * as EOF, unless the request is synchronous. (If the request is
2054 * sync, it comes from lfs_truncate.)
2055 * XXXUBC Make these pages look "active" so the pagedaemon won't
2056 * XXXUBC bother us with them again.
2057 */
2058 if (!sync && ap->a_offlo >= ip->i_size && ap->a_offlo < blkeof) {
2059 origoffset = ap->a_offlo;
2060 for (off = origoffset; off < blkeof; off += fs->lfs_bsize) {
2061 pg = uvm_pagelookup(&vp->v_uobj, off);
2062 KASSERT(pg != NULL);
2063 while (pg->flags & PG_BUSY) {
2064 pg->flags |= PG_WANTED;
2065 UVM_UNLOCK_AND_WAIT(pg, &vp->v_interlock, 0,
2066 "lfsput2", 0);
2067 mutex_enter(&vp->v_interlock);
2068 }
2069 mutex_enter(&uvm_pageqlock);
2070 uvm_pageactivate(pg);
2071 mutex_exit(&uvm_pageqlock);
2072 }
2073 ap->a_offlo = blkeof;
2074 if (ap->a_offhi > 0 && ap->a_offhi <= ap->a_offlo) {
2075 mutex_exit(&vp->v_interlock);
2076 return 0;
2077 }
2078 }
2079
2080 /*
2081 * Extend page range to start and end at block boundaries.
2082 * (For the purposes of VOP_PUTPAGES, fragments don't exist.)
2083 */
2084 origoffset = ap->a_offlo;
2085 origendoffset = ap->a_offhi;
2086 startoffset = origoffset & ~(fs->lfs_bmask);
2087 max_endoffset = (trunc_page(LLONG_MAX) >> fs->lfs_bshift)
2088 << fs->lfs_bshift;
2089
2090 if (origendoffset == 0 || ap->a_flags & PGO_ALLPAGES) {
2091 endoffset = max_endoffset;
2092 origendoffset = endoffset;
2093 } else {
2094 origendoffset = round_page(ap->a_offhi);
2095 endoffset = round_page(blkroundup(fs, origendoffset));
2096 }
2097
2098 KASSERT(startoffset > 0 || endoffset >= startoffset);
2099 if (startoffset == endoffset) {
2100 /* Nothing to do, why were we called? */
2101 mutex_exit(&vp->v_interlock);
2102 DLOG((DLOG_PAGE, "lfs_putpages: startoffset = endoffset = %"
2103 PRId64 "\n", startoffset));
2104 return 0;
2105 }
2106
2107 ap->a_offlo = startoffset;
2108 ap->a_offhi = endoffset;
2109
2110 /*
2111 * If not cleaning, just send the pages through genfs_putpages
2112 * to be returned to the pool.
2113 */
2114 if (!(ap->a_flags & PGO_CLEANIT))
2115 return genfs_putpages(v);
2116
2117 /* Set PGO_BUSYFAIL to avoid deadlocks */
2118 ap->a_flags |= PGO_BUSYFAIL;
2119
2120 /*
2121 * Likewise, if we are asked to clean but the pages are not
2122 * dirty, we can just free them using genfs_putpages.
2123 */
2124 #ifdef DEBUG
2125 debug_n_dirtyclean = 0;
2126 #endif
2127 do {
2128 int r;
2129
2130 /* Count the number of dirty pages */
2131 r = check_dirty(fs, vp, startoffset, endoffset, blkeof,
2132 ap->a_flags, 1, NULL);
2133 if (r < 0) {
2134 /* Pages are busy with another process */
2135 mutex_exit(&vp->v_interlock);
2136 return EDEADLK;
2137 }
2138 if (r > 0) /* Some pages are dirty */
2139 break;
2140
2141 /*
2142 * Sometimes pages are dirtied between the time that
2143 * we check and the time we try to clean them.
2144 * Instruct lfs_gop_write to return EDEADLK in this case
2145 * so we can write them properly.
2146 */
2147 ip->i_lfs_iflags |= LFSI_NO_GOP_WRITE;
2148 r = genfs_do_putpages(vp, startoffset, endoffset,
2149 ap->a_flags, &busypg);
2150 ip->i_lfs_iflags &= ~LFSI_NO_GOP_WRITE;
2151 if (r != EDEADLK)
2152 return r;
2153
2154 /* One of the pages was busy. Start over. */
2155 mutex_enter(&vp->v_interlock);
2156 wait_for_page(vp, busypg, "dirtyclean");
2157 #ifdef DEBUG
2158 ++debug_n_dirtyclean;
2159 #endif
2160 } while(1);
2161
2162 #ifdef DEBUG
2163 if (debug_n_dirtyclean > TOOMANY)
2164 printf("lfs_putpages: dirtyclean: looping, n = %d\n",
2165 debug_n_dirtyclean);
2166 #endif
2167
2168 /*
2169 * Dirty and asked to clean.
2170 *
2171 * Pagedaemon can't actually write LFS pages; wake up
2172 * the writer to take care of that. The writer will
2173 * notice the pager inode queue and act on that.
2174 */
2175 if (pagedaemon) {
2176 mutex_enter(&lfs_lock);
2177 if (!(ip->i_flags & IN_PAGING)) {
2178 ip->i_flags |= IN_PAGING;
2179 TAILQ_INSERT_TAIL(&fs->lfs_pchainhd, ip, i_lfs_pchain);
2180 }
2181 wakeup(&lfs_writer_daemon);
2182 mutex_exit(&lfs_lock);
2183 mutex_exit(&vp->v_interlock);
2184 preempt();
2185 return EWOULDBLOCK;
2186 }
2187
2188 /*
2189 * If this is a file created in a recent dirop, we can't flush its
2190 * inode until the dirop is complete. Drain dirops, then flush the
2191 * filesystem (taking care of any other pending dirops while we're
2192 * at it).
2193 */
2194 if ((ap->a_flags & (PGO_CLEANIT|PGO_LOCKED)) == PGO_CLEANIT &&
2195 (vp->v_uflag & VU_DIROP)) {
2196 int locked;
2197
2198 DLOG((DLOG_PAGE, "lfs_putpages: flushing VU_DIROP\n"));
2199 locked = (VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
2200 mutex_exit(&vp->v_interlock);
2201 lfs_writer_enter(fs, "ppdirop");
2202 if (locked)
2203 VOP_UNLOCK(vp, 0); /* XXX why? */
2204
2205 mutex_enter(&lfs_lock);
2206 lfs_flush_fs(fs, sync ? SEGM_SYNC : 0);
2207 mutex_exit(&lfs_lock);
2208
2209 mutex_enter(&vp->v_interlock);
2210 if (locked) {
2211 VOP_LOCK(vp, LK_EXCLUSIVE | LK_INTERLOCK);
2212 mutex_enter(&vp->v_interlock);
2213 }
2214 lfs_writer_leave(fs);
2215
2216 /* XXX the flush should have taken care of this one too! */
2217 }
2218
2219 /*
2220 * This is it. We are going to write some pages. From here on
2221 * down it's all just mechanics.
2222 *
2223 * Don't let genfs_putpages wait; lfs_segunlock will wait for us.
2224 */
2225 ap->a_flags &= ~PGO_SYNCIO;
2226
2227 /*
2228 * If we've already got the seglock, flush the node and return.
2229 * The FIP has already been set up for us by lfs_writefile,
2230 * and FIP cleanup and lfs_updatemeta will also be done there,
2231 * unless genfs_putpages returns EDEADLK; then we must flush
2232 * what we have, and correct FIP and segment header accounting.
2233 */
2234 get_seglock:
2235 /*
2236 * If we are not called with the segment locked, lock it.
2237 * Account for a new FIP in the segment header, and set sp->vp.
2238 * (This should duplicate the setup at the top of lfs_writefile().)
2239 */
2240 seglocked = (ap->a_flags & PGO_LOCKED) != 0;
2241 if (!seglocked) {
2242 mutex_exit(&vp->v_interlock);
2243 error = lfs_seglock(fs, SEGM_PROT | (sync ? SEGM_SYNC : 0));
2244 if (error != 0)
2245 return error;
2246 mutex_enter(&vp->v_interlock);
2247 lfs_acquire_finfo(fs, ip->i_number, ip->i_gen);
2248 }
2249 sp = fs->lfs_sp;
2250 KASSERT(sp->vp == NULL);
2251 sp->vp = vp;
2252
2253 /*
2254 * Ensure that the partial segment is marked SS_DIROP if this
2255 * vnode is a DIROP.
2256 */
2257 if (!seglocked && vp->v_uflag & VU_DIROP)
2258 ((SEGSUM *)(sp->segsum))->ss_flags |= (SS_DIROP|SS_CONT);
2259
2260 /*
2261 * Loop over genfs_putpages until all pages are gathered.
2262 * genfs_putpages() drops the interlock, so reacquire it if necessary.
2263 * Whenever we lose the interlock we have to rerun check_dirty, as
2264 * well, since more pages might have been dirtied in our absence.
2265 */
2266 #ifdef DEBUG
2267 debug_n_again = 0;
2268 #endif
2269 do {
2270 busypg = NULL;
2271 if (check_dirty(fs, vp, startoffset, endoffset, blkeof,
2272 ap->a_flags, 0, &busypg) < 0) {
2273 mutex_exit(&vp->v_interlock);
2274
2275 mutex_enter(&vp->v_interlock);
2276 write_and_wait(fs, vp, busypg, seglocked, NULL);
2277 if (!seglocked) {
2278 lfs_release_finfo(fs);
2279 lfs_segunlock(fs);
2280 }
2281 sp->vp = NULL;
2282 goto get_seglock;
2283 }
2284
2285 busypg = NULL;
2286 error = genfs_do_putpages(vp, startoffset, endoffset,
2287 ap->a_flags, &busypg);
2288
2289 if (error == EDEADLK || error == EAGAIN) {
2290 DLOG((DLOG_PAGE, "lfs_putpages: genfs_putpages returned"
2291 " %d ino %d off %x (seg %d)\n", error,
2292 ip->i_number, fs->lfs_offset,
2293 dtosn(fs, fs->lfs_offset)));
2294
2295 mutex_enter(&vp->v_interlock);
2296 write_and_wait(fs, vp, busypg, seglocked, "again");
2297 }
2298 #ifdef DEBUG
2299 ++debug_n_again;
2300 #endif
2301 } while (error == EDEADLK);
2302 #ifdef DEBUG
2303 if (debug_n_again > TOOMANY)
2304 printf("lfs_putpages: again: looping, n = %d\n", debug_n_again);
2305 #endif
2306
2307 KASSERT(sp != NULL && sp->vp == vp);
2308 if (!seglocked) {
2309 sp->vp = NULL;
2310
2311 /* Write indirect blocks as well */
2312 lfs_gather(fs, fs->lfs_sp, vp, lfs_match_indir);
2313 lfs_gather(fs, fs->lfs_sp, vp, lfs_match_dindir);
2314 lfs_gather(fs, fs->lfs_sp, vp, lfs_match_tindir);
2315
2316 KASSERT(sp->vp == NULL);
2317 sp->vp = vp;
2318 }
2319
2320 /*
2321 * Blocks are now gathered into a segment waiting to be written.
2322 * All that's left to do is update metadata, and write them.
2323 */
2324 lfs_updatemeta(sp);
2325 KASSERT(sp->vp == vp);
2326 sp->vp = NULL;
2327
2328 /*
2329 * If we were called from lfs_writefile, we don't need to clean up
2330 * the FIP or unlock the segment lock. We're done.
2331 */
2332 if (seglocked)
2333 return error;
2334
2335 /* Clean up FIP and send it to disk. */
2336 lfs_release_finfo(fs);
2337 lfs_writeseg(fs, fs->lfs_sp);
2338
2339 /*
2340 * Remove us from paging queue if we wrote all our pages.
2341 */
2342 if (origendoffset == 0 || ap->a_flags & PGO_ALLPAGES) {
2343 mutex_enter(&lfs_lock);
2344 if (ip->i_flags & IN_PAGING) {
2345 ip->i_flags &= ~IN_PAGING;
2346 TAILQ_REMOVE(&fs->lfs_pchainhd, ip, i_lfs_pchain);
2347 }
2348 mutex_exit(&lfs_lock);
2349 }
2350
2351 /*
2352 * XXX - with the malloc/copy writeseg, the pages are freed by now
2353 * even if we don't wait (e.g. if we hold a nested lock). This
2354 * will not be true if we stop using malloc/copy.
2355 */
2356 KASSERT(fs->lfs_sp->seg_flags & SEGM_PROT);
2357 lfs_segunlock(fs);
2358
2359 /*
2360 * Wait for v_numoutput to drop to zero. The seglock should
2361 * take care of this, but there is a slight possibility that
2362 * aiodoned might not have got around to our buffers yet.
2363 */
2364 if (sync) {
2365 mutex_enter(&vp->v_interlock);
2366 while (vp->v_numoutput > 0) {
2367 DLOG((DLOG_PAGE, "lfs_putpages: ino %d sleeping on"
2368 " num %d\n", ip->i_number, vp->v_numoutput));
2369 cv_wait(&vp->v_cv, &vp->v_interlock);
2370 }
2371 mutex_exit(&vp->v_interlock);
2372 }
2373 return error;
2374 }
2375
2376 /*
2377 * Return the last logical file offset that should be written for this file
2378 * if we're doing a write that ends at "size". If writing, we need to know
2379 * about sizes on disk, i.e. fragments if there are any; if reading, we need
2380 * to know about entire blocks.
2381 */
2382 void
2383 lfs_gop_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
2384 {
2385 struct inode *ip = VTOI(vp);
2386 struct lfs *fs = ip->i_lfs;
2387 daddr_t olbn, nlbn;
2388
2389 olbn = lblkno(fs, ip->i_size);
2390 nlbn = lblkno(fs, size);
2391 if (!(flags & GOP_SIZE_MEM) && nlbn < NDADDR && olbn <= nlbn) {
2392 *eobp = fragroundup(fs, size);
2393 } else {
2394 *eobp = blkroundup(fs, size);
2395 }
2396 }
2397
2398 #ifdef DEBUG
2399 void lfs_dump_vop(void *);
2400
2401 void
2402 lfs_dump_vop(void *v)
2403 {
2404 struct vop_putpages_args /* {
2405 struct vnode *a_vp;
2406 voff_t a_offlo;
2407 voff_t a_offhi;
2408 int a_flags;
2409 } */ *ap = v;
2410
2411 #ifdef DDB
2412 vfs_vnode_print(ap->a_vp, 0, printf);
2413 #endif
2414 lfs_dump_dinode(VTOI(ap->a_vp)->i_din.ffs1_din);
2415 }
2416 #endif
2417
2418 int
2419 lfs_mmap(void *v)
2420 {
2421 struct vop_mmap_args /* {
2422 const struct vnodeop_desc *a_desc;
2423 struct vnode *a_vp;
2424 vm_prot_t a_prot;
2425 kauth_cred_t a_cred;
2426 } */ *ap = v;
2427
2428 if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM)
2429 return EOPNOTSUPP;
2430 return ufs_mmap(v);
2431 }
2432