lfs_vfsops.c revision 1.329 1 /* $NetBSD: lfs_vfsops.c,v 1.329 2015/07/28 05:09:35 dholland Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2007, 2007
5 * The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Konrad E. Schroder <perseant (at) hhhh.org>.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32 /*-
33 * Copyright (c) 1989, 1991, 1993, 1994
34 * The Regents of the University of California. All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)lfs_vfsops.c 8.20 (Berkeley) 6/10/95
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: lfs_vfsops.c,v 1.329 2015/07/28 05:09:35 dholland Exp $");
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_lfs.h"
68 #include "opt_quota.h"
69 #endif
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/namei.h>
74 #include <sys/proc.h>
75 #include <sys/kernel.h>
76 #include <sys/vnode.h>
77 #include <sys/mount.h>
78 #include <sys/kthread.h>
79 #include <sys/buf.h>
80 #include <sys/device.h>
81 #include <sys/mbuf.h>
82 #include <sys/file.h>
83 #include <sys/disklabel.h>
84 #include <sys/ioctl.h>
85 #include <sys/errno.h>
86 #include <sys/malloc.h>
87 #include <sys/pool.h>
88 #include <sys/socket.h>
89 #include <sys/syslog.h>
90 #include <uvm/uvm_extern.h>
91 #include <sys/sysctl.h>
92 #include <sys/conf.h>
93 #include <sys/kauth.h>
94 #include <sys/module.h>
95 #include <sys/syscallvar.h>
96 #include <sys/syscall.h>
97 #include <sys/syscallargs.h>
98
99 #include <miscfs/specfs/specdev.h>
100
101 #include <ufs/lfs/ulfs_quotacommon.h>
102 #include <ufs/lfs/ulfs_inode.h>
103 #include <ufs/lfs/ulfsmount.h>
104 #include <ufs/lfs/ulfs_bswap.h>
105 #include <ufs/lfs/ulfs_extern.h>
106
107 #include <uvm/uvm.h>
108 #include <uvm/uvm_stat.h>
109 #include <uvm/uvm_pager.h>
110 #include <uvm/uvm_pdaemon.h>
111
112 #include <ufs/lfs/lfs.h>
113 #include <ufs/lfs/lfs_accessors.h>
114 #include <ufs/lfs/lfs_kernel.h>
115 #include <ufs/lfs/lfs_extern.h>
116
117 #include <miscfs/genfs/genfs.h>
118 #include <miscfs/genfs/genfs_node.h>
119
120 MODULE(MODULE_CLASS_VFS, lfs, NULL);
121
122 static int lfs_gop_write(struct vnode *, struct vm_page **, int, int);
123 static int lfs_mountfs(struct vnode *, struct mount *, struct lwp *);
124
125 static struct sysctllog *lfs_sysctl_log;
126
127 extern const struct vnodeopv_desc lfs_vnodeop_opv_desc;
128 extern const struct vnodeopv_desc lfs_specop_opv_desc;
129 extern const struct vnodeopv_desc lfs_fifoop_opv_desc;
130
131 pid_t lfs_writer_daemon = 0;
132 lwpid_t lfs_writer_lid = 0;
133 int lfs_do_flush = 0;
134 #ifdef LFS_KERNEL_RFW
135 int lfs_do_rfw = 0;
136 #endif
137
138 const struct vnodeopv_desc * const lfs_vnodeopv_descs[] = {
139 &lfs_vnodeop_opv_desc,
140 &lfs_specop_opv_desc,
141 &lfs_fifoop_opv_desc,
142 NULL,
143 };
144
145 struct vfsops lfs_vfsops = {
146 .vfs_name = MOUNT_LFS,
147 .vfs_min_mount_data = sizeof (struct ulfs_args),
148 .vfs_mount = lfs_mount,
149 .vfs_start = ulfs_start,
150 .vfs_unmount = lfs_unmount,
151 .vfs_root = ulfs_root,
152 .vfs_quotactl = ulfs_quotactl,
153 .vfs_statvfs = lfs_statvfs,
154 .vfs_sync = lfs_sync,
155 .vfs_vget = lfs_vget,
156 .vfs_loadvnode = lfs_loadvnode,
157 .vfs_newvnode = lfs_newvnode,
158 .vfs_fhtovp = lfs_fhtovp,
159 .vfs_vptofh = lfs_vptofh,
160 .vfs_init = lfs_init,
161 .vfs_reinit = lfs_reinit,
162 .vfs_done = lfs_done,
163 .vfs_mountroot = lfs_mountroot,
164 .vfs_snapshot = (void *)eopnotsupp,
165 .vfs_extattrctl = lfs_extattrctl,
166 .vfs_suspendctl = (void *)eopnotsupp,
167 .vfs_renamelock_enter = genfs_renamelock_enter,
168 .vfs_renamelock_exit = genfs_renamelock_exit,
169 .vfs_fsync = (void *)eopnotsupp,
170 .vfs_opv_descs = lfs_vnodeopv_descs
171 };
172
173 const struct genfs_ops lfs_genfsops = {
174 .gop_size = lfs_gop_size,
175 .gop_alloc = ulfs_gop_alloc,
176 .gop_write = lfs_gop_write,
177 .gop_markupdate = ulfs_gop_markupdate,
178 };
179
180 struct shortlong {
181 const char *sname;
182 const char *lname;
183 };
184
185 static int
186 sysctl_lfs_dostats(SYSCTLFN_ARGS)
187 {
188 extern struct lfs_stats lfs_stats;
189 extern int lfs_dostats;
190 int error;
191
192 error = sysctl_lookup(SYSCTLFN_CALL(rnode));
193 if (error || newp == NULL)
194 return (error);
195
196 if (lfs_dostats == 0)
197 memset(&lfs_stats, 0, sizeof(lfs_stats));
198
199 return (0);
200 }
201
202 static void
203 lfs_sysctl_setup(struct sysctllog **clog)
204 {
205 int i;
206 extern int lfs_writeindir, lfs_dostats, lfs_clean_vnhead,
207 lfs_fs_pagetrip, lfs_ignore_lazy_sync;
208 #ifdef DEBUG
209 extern int lfs_debug_log_subsys[DLOG_MAX];
210 struct shortlong dlog_names[DLOG_MAX] = { /* Must match lfs.h ! */
211 { "rollforward", "Debug roll-forward code" },
212 { "alloc", "Debug inode allocation and free list" },
213 { "avail", "Debug space-available-now accounting" },
214 { "flush", "Debug flush triggers" },
215 { "lockedlist", "Debug locked list accounting" },
216 { "vnode_verbose", "Verbose per-vnode-written debugging" },
217 { "vnode", "Debug vnode use during segment write" },
218 { "segment", "Debug segment writing" },
219 { "seguse", "Debug segment used-bytes accounting" },
220 { "cleaner", "Debug cleaning routines" },
221 { "mount", "Debug mount/unmount routines" },
222 { "pagecache", "Debug UBC interactions" },
223 { "dirop", "Debug directory-operation accounting" },
224 { "malloc", "Debug private malloc accounting" },
225 };
226 #endif /* DEBUG */
227 struct shortlong stat_names[] = { /* Must match lfs.h! */
228 { "segsused", "Number of new segments allocated" },
229 { "psegwrites", "Number of partial-segment writes" },
230 { "psyncwrites", "Number of synchronous partial-segment"
231 " writes" },
232 { "pcleanwrites", "Number of partial-segment writes by the"
233 " cleaner" },
234 { "blocktot", "Number of blocks written" },
235 { "cleanblocks", "Number of blocks written by the cleaner" },
236 { "ncheckpoints", "Number of checkpoints made" },
237 { "nwrites", "Number of whole writes" },
238 { "nsync_writes", "Number of synchronous writes" },
239 { "wait_exceeded", "Number of times writer waited for"
240 " cleaner" },
241 { "write_exceeded", "Number of times writer invoked flush" },
242 { "flush_invoked", "Number of times flush was invoked" },
243 { "vflush_invoked", "Number of time vflush was called" },
244 { "clean_inlocked", "Number of vnodes skipped for being dead" },
245 { "clean_vnlocked", "Number of vnodes skipped for vget failure" },
246 { "segs_reclaimed", "Number of segments reclaimed" },
247 };
248
249 sysctl_createv(clog, 0, NULL, NULL,
250 CTLFLAG_PERMANENT,
251 CTLTYPE_NODE, "lfs",
252 SYSCTL_DESCR("Log-structured file system"),
253 NULL, 0, NULL, 0,
254 CTL_VFS, 5, CTL_EOL);
255 /*
256 * XXX the "5" above could be dynamic, thereby eliminating one
257 * more instance of the "number to vfs" mapping problem, but
258 * "5" is the order as taken from sys/mount.h
259 */
260
261 sysctl_createv(clog, 0, NULL, NULL,
262 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
263 CTLTYPE_INT, "flushindir", NULL,
264 NULL, 0, &lfs_writeindir, 0,
265 CTL_VFS, 5, LFS_WRITEINDIR, CTL_EOL);
266 sysctl_createv(clog, 0, NULL, NULL,
267 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
268 CTLTYPE_INT, "clean_vnhead", NULL,
269 NULL, 0, &lfs_clean_vnhead, 0,
270 CTL_VFS, 5, LFS_CLEAN_VNHEAD, CTL_EOL);
271 sysctl_createv(clog, 0, NULL, NULL,
272 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
273 CTLTYPE_INT, "dostats",
274 SYSCTL_DESCR("Maintain statistics on LFS operations"),
275 sysctl_lfs_dostats, 0, &lfs_dostats, 0,
276 CTL_VFS, 5, LFS_DOSTATS, CTL_EOL);
277 sysctl_createv(clog, 0, NULL, NULL,
278 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
279 CTLTYPE_INT, "pagetrip",
280 SYSCTL_DESCR("How many dirty pages in fs triggers"
281 " a flush"),
282 NULL, 0, &lfs_fs_pagetrip, 0,
283 CTL_VFS, 5, LFS_FS_PAGETRIP, CTL_EOL);
284 sysctl_createv(clog, 0, NULL, NULL,
285 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
286 CTLTYPE_INT, "ignore_lazy_sync",
287 SYSCTL_DESCR("Lazy Sync is ignored entirely"),
288 NULL, 0, &lfs_ignore_lazy_sync, 0,
289 CTL_VFS, 5, LFS_IGNORE_LAZY_SYNC, CTL_EOL);
290 #ifdef LFS_KERNEL_RFW
291 sysctl_createv(clog, 0, NULL, NULL,
292 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
293 CTLTYPE_INT, "rfw",
294 SYSCTL_DESCR("Use in-kernel roll-forward on mount"),
295 NULL, 0, &lfs_do_rfw, 0,
296 CTL_VFS, 5, LFS_DO_RFW, CTL_EOL);
297 #endif
298
299 sysctl_createv(clog, 0, NULL, NULL,
300 CTLFLAG_PERMANENT,
301 CTLTYPE_NODE, "stats",
302 SYSCTL_DESCR("Debugging options"),
303 NULL, 0, NULL, 0,
304 CTL_VFS, 5, LFS_STATS, CTL_EOL);
305 for (i = 0; i < sizeof(struct lfs_stats) / sizeof(u_int); i++) {
306 sysctl_createv(clog, 0, NULL, NULL,
307 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
308 CTLTYPE_INT, stat_names[i].sname,
309 SYSCTL_DESCR(stat_names[i].lname),
310 NULL, 0, &(((u_int *)&lfs_stats.segsused)[i]),
311 0, CTL_VFS, 5, LFS_STATS, i, CTL_EOL);
312 }
313
314 #ifdef DEBUG
315 sysctl_createv(clog, 0, NULL, NULL,
316 CTLFLAG_PERMANENT,
317 CTLTYPE_NODE, "debug",
318 SYSCTL_DESCR("Debugging options"),
319 NULL, 0, NULL, 0,
320 CTL_VFS, 5, LFS_DEBUGLOG, CTL_EOL);
321 for (i = 0; i < DLOG_MAX; i++) {
322 sysctl_createv(clog, 0, NULL, NULL,
323 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
324 CTLTYPE_INT, dlog_names[i].sname,
325 SYSCTL_DESCR(dlog_names[i].lname),
326 NULL, 0, &(lfs_debug_log_subsys[i]), 0,
327 CTL_VFS, 5, LFS_DEBUGLOG, i, CTL_EOL);
328 }
329 #endif
330 }
331
332 /* old cleaner syscall interface. see VOP_FCNTL() */
333 static const struct syscall_package lfs_syscalls[] = {
334 { SYS_lfs_bmapv, 0, (sy_call_t *)sys_lfs_bmapv },
335 { SYS_lfs_markv, 0, (sy_call_t *)sys_lfs_markv },
336 { SYS_lfs_segclean, 0, (sy_call_t *)sys___lfs_segwait50 },
337 { 0, 0, NULL },
338 };
339
340 static int
341 lfs_modcmd(modcmd_t cmd, void *arg)
342 {
343 int error;
344
345 switch (cmd) {
346 case MODULE_CMD_INIT:
347 error = syscall_establish(NULL, lfs_syscalls);
348 if (error)
349 return error;
350 error = vfs_attach(&lfs_vfsops);
351 if (error != 0) {
352 syscall_disestablish(NULL, lfs_syscalls);
353 break;
354 }
355 lfs_sysctl_setup(&lfs_sysctl_log);
356 break;
357 case MODULE_CMD_FINI:
358 error = vfs_detach(&lfs_vfsops);
359 if (error != 0)
360 break;
361 syscall_disestablish(NULL, lfs_syscalls);
362 sysctl_teardown(&lfs_sysctl_log);
363 break;
364 default:
365 error = ENOTTY;
366 break;
367 }
368
369 return (error);
370 }
371
372 /*
373 * XXX Same structure as FFS inodes? Should we share a common pool?
374 */
375 struct pool lfs_inode_pool;
376 struct pool lfs_dinode_pool;
377 struct pool lfs_inoext_pool;
378 struct pool lfs_lbnentry_pool;
379
380 /*
381 * The writer daemon. UVM keeps track of how many dirty pages we are holding
382 * in lfs_subsys_pages; the daemon flushes the filesystem when this value
383 * crosses the (user-defined) threshhold LFS_MAX_PAGES.
384 */
385 static void
386 lfs_writerd(void *arg)
387 {
388 struct mount *mp, *nmp;
389 struct lfs *fs;
390 struct vfsops *vfs = NULL;
391 int fsflags;
392 int skipc;
393 int lfsc;
394 int wrote_something = 0;
395
396 mutex_enter(&lfs_lock);
397 lfs_writer_daemon = curproc->p_pid;
398 lfs_writer_lid = curlwp->l_lid;
399 mutex_exit(&lfs_lock);
400
401 /* Take an extra reference to the LFS vfsops. */
402 vfs = vfs_getopsbyname(MOUNT_LFS);
403
404 mutex_enter(&lfs_lock);
405 for (;;) {
406 KASSERT(mutex_owned(&lfs_lock));
407 if (wrote_something == 0)
408 mtsleep(&lfs_writer_daemon, PVM, "lfswriter", hz/10 + 1,
409 &lfs_lock);
410
411 KASSERT(mutex_owned(&lfs_lock));
412 wrote_something = 0;
413
414 /*
415 * If global state wants a flush, flush everything.
416 */
417 if (lfs_do_flush || locked_queue_count > LFS_MAX_BUFS ||
418 locked_queue_bytes > LFS_MAX_BYTES ||
419 lfs_subsys_pages > LFS_MAX_PAGES) {
420
421 if (lfs_do_flush) {
422 DLOG((DLOG_FLUSH, "lfs_writerd: lfs_do_flush\n"));
423 }
424 if (locked_queue_count > LFS_MAX_BUFS) {
425 DLOG((DLOG_FLUSH, "lfs_writerd: lqc = %d, max %d\n",
426 locked_queue_count, LFS_MAX_BUFS));
427 }
428 if (locked_queue_bytes > LFS_MAX_BYTES) {
429 DLOG((DLOG_FLUSH, "lfs_writerd: lqb = %ld, max %ld\n",
430 locked_queue_bytes, LFS_MAX_BYTES));
431 }
432 if (lfs_subsys_pages > LFS_MAX_PAGES) {
433 DLOG((DLOG_FLUSH, "lfs_writerd: lssp = %d, max %d\n",
434 lfs_subsys_pages, LFS_MAX_PAGES));
435 }
436
437 lfs_flush(NULL, SEGM_WRITERD, 0);
438 lfs_do_flush = 0;
439 KASSERT(mutex_owned(&lfs_lock));
440 continue;
441 }
442 KASSERT(mutex_owned(&lfs_lock));
443 mutex_exit(&lfs_lock);
444
445 /*
446 * Look through the list of LFSs to see if any of them
447 * have requested pageouts.
448 */
449 mutex_enter(&mountlist_lock);
450 lfsc = 0;
451 skipc = 0;
452 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
453 if (vfs_busy(mp, &nmp)) {
454 ++skipc;
455 continue;
456 }
457 KASSERT(!mutex_owned(&lfs_lock));
458 if (strncmp(mp->mnt_stat.f_fstypename, MOUNT_LFS,
459 sizeof(mp->mnt_stat.f_fstypename)) == 0) {
460 ++lfsc;
461 fs = VFSTOULFS(mp)->um_lfs;
462 int32_t ooffset = 0;
463 fsflags = SEGM_SINGLE;
464
465 mutex_enter(&lfs_lock);
466 ooffset = lfs_sb_getoffset(fs);
467
468 if (lfs_sb_getnextseg(fs) < lfs_sb_getcurseg(fs) && fs->lfs_nowrap) {
469 /* Don't try to write if we're suspended */
470 mutex_exit(&lfs_lock);
471 vfs_unbusy(mp, false, &nmp);
472 continue;
473 }
474 if (LFS_STARVED_FOR_SEGS(fs)) {
475 mutex_exit(&lfs_lock);
476
477 DLOG((DLOG_FLUSH, "lfs_writerd: need cleaning before writing possible\n"));
478 lfs_wakeup_cleaner(fs);
479 vfs_unbusy(mp, false, &nmp);
480 continue;
481 }
482
483 if ((fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
484 lfs_dirvcount > LFS_MAX_DIROP) &&
485 fs->lfs_dirops == 0) {
486 fsflags &= ~SEGM_SINGLE;
487 fsflags |= SEGM_CKP;
488 DLOG((DLOG_FLUSH, "lfs_writerd: checkpoint\n"));
489 lfs_flush_fs(fs, fsflags);
490 } else if (fs->lfs_pdflush) {
491 DLOG((DLOG_FLUSH, "lfs_writerd: pdflush set\n"));
492 lfs_flush_fs(fs, fsflags);
493 } else if (!TAILQ_EMPTY(&fs->lfs_pchainhd)) {
494 DLOG((DLOG_FLUSH, "lfs_writerd: pchain non-empty\n"));
495 mutex_exit(&lfs_lock);
496 lfs_writer_enter(fs, "wrdirop");
497 lfs_flush_pchain(fs);
498 lfs_writer_leave(fs);
499 mutex_enter(&lfs_lock);
500 }
501 if (lfs_sb_getoffset(fs) != ooffset)
502 ++wrote_something;
503 mutex_exit(&lfs_lock);
504 }
505 KASSERT(!mutex_owned(&lfs_lock));
506 vfs_unbusy(mp, false, &nmp);
507 }
508 if (lfsc + skipc == 0) {
509 mutex_enter(&lfs_lock);
510 lfs_writer_daemon = 0;
511 lfs_writer_lid = 0;
512 mutex_exit(&lfs_lock);
513 mutex_exit(&mountlist_lock);
514 break;
515 }
516 mutex_exit(&mountlist_lock);
517
518 mutex_enter(&lfs_lock);
519 }
520 KASSERT(!mutex_owned(&lfs_lock));
521 KASSERT(!mutex_owned(&mountlist_lock));
522
523 /* Give up our extra reference so the module can be unloaded. */
524 mutex_enter(&vfs_list_lock);
525 if (vfs != NULL)
526 vfs->vfs_refcount--;
527 mutex_exit(&vfs_list_lock);
528
529 /* Done! */
530 kthread_exit(0);
531 }
532
533 /*
534 * Initialize the filesystem, most work done by ulfs_init.
535 */
536 void
537 lfs_init(void)
538 {
539
540 malloc_type_attach(M_SEGMENT);
541 pool_init(&lfs_inode_pool, sizeof(struct inode), 0, 0, 0,
542 "lfsinopl", &pool_allocator_nointr, IPL_NONE);
543 pool_init(&lfs_dinode_pool, sizeof(struct ulfs1_dinode), 0, 0, 0,
544 "lfsdinopl", &pool_allocator_nointr, IPL_NONE);
545 pool_init(&lfs_inoext_pool, sizeof(struct lfs_inode_ext), 8, 0, 0,
546 "lfsinoextpl", &pool_allocator_nointr, IPL_NONE);
547 pool_init(&lfs_lbnentry_pool, sizeof(struct lbnentry), 0, 0, 0,
548 "lfslbnpool", &pool_allocator_nointr, IPL_NONE);
549 ulfs_init();
550
551 #ifdef DEBUG
552 memset(lfs_log, 0, sizeof(lfs_log));
553 #endif
554 mutex_init(&lfs_lock, MUTEX_DEFAULT, IPL_NONE);
555 cv_init(&locked_queue_cv, "lfsbuf");
556 cv_init(&lfs_writing_cv, "lfsflush");
557 }
558
559 void
560 lfs_reinit(void)
561 {
562 ulfs_reinit();
563 }
564
565 void
566 lfs_done(void)
567 {
568 ulfs_done();
569 mutex_destroy(&lfs_lock);
570 cv_destroy(&locked_queue_cv);
571 cv_destroy(&lfs_writing_cv);
572 pool_destroy(&lfs_inode_pool);
573 pool_destroy(&lfs_dinode_pool);
574 pool_destroy(&lfs_inoext_pool);
575 pool_destroy(&lfs_lbnentry_pool);
576 malloc_type_detach(M_SEGMENT);
577 }
578
579 /*
580 * Called by main() when ulfs is going to be mounted as root.
581 */
582 int
583 lfs_mountroot(void)
584 {
585 extern struct vnode *rootvp;
586 struct lfs *fs = NULL; /* LFS */
587 struct mount *mp;
588 struct lwp *l = curlwp;
589 struct ulfsmount *ump;
590 int error;
591
592 if (device_class(root_device) != DV_DISK)
593 return (ENODEV);
594
595 if (rootdev == NODEV)
596 return (ENODEV);
597 if ((error = vfs_rootmountalloc(MOUNT_LFS, "root_device", &mp))) {
598 vrele(rootvp);
599 return (error);
600 }
601 if ((error = lfs_mountfs(rootvp, mp, l))) {
602 vfs_unbusy(mp, false, NULL);
603 vfs_destroy(mp);
604 return (error);
605 }
606 mountlist_append(mp);
607 ump = VFSTOULFS(mp);
608 fs = ump->um_lfs;
609 memset(fs->lfs_dlfs.dlfs_fsmnt, 0, sizeof(fs->lfs_dlfs.dlfs_fsmnt));
610 (void)copystr(mp->mnt_stat.f_mntonname, fs->lfs_dlfs.dlfs_fsmnt, sizeof(fs->lfs_dlfs.dlfs_fsmnt), 0);
611 (void)lfs_statvfs(mp, &mp->mnt_stat);
612 vfs_unbusy(mp, false, NULL);
613 setrootfstime((time_t)lfs_sb_gettstamp(VFSTOULFS(mp)->um_lfs));
614 return (0);
615 }
616
617 /*
618 * VFS Operations.
619 *
620 * mount system call
621 */
622 int
623 lfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
624 {
625 struct lwp *l = curlwp;
626 struct vnode *devvp;
627 struct ulfs_args *args = data;
628 struct ulfsmount *ump = NULL;
629 struct lfs *fs = NULL; /* LFS */
630 int error = 0, update;
631 mode_t accessmode;
632
633 if (args == NULL)
634 return EINVAL;
635 if (*data_len < sizeof *args)
636 return EINVAL;
637
638 if (mp->mnt_flag & MNT_GETARGS) {
639 ump = VFSTOULFS(mp);
640 if (ump == NULL)
641 return EIO;
642 args->fspec = NULL;
643 *data_len = sizeof *args;
644 return 0;
645 }
646
647 update = mp->mnt_flag & MNT_UPDATE;
648
649 /* Check arguments */
650 if (args->fspec != NULL) {
651 /*
652 * Look up the name and verify that it's sane.
653 */
654 error = namei_simple_user(args->fspec,
655 NSM_FOLLOW_NOEMULROOT, &devvp);
656 if (error != 0)
657 return (error);
658
659 if (!update) {
660 /*
661 * Be sure this is a valid block device
662 */
663 if (devvp->v_type != VBLK)
664 error = ENOTBLK;
665 else if (bdevsw_lookup(devvp->v_rdev) == NULL)
666 error = ENXIO;
667 } else {
668 /*
669 * Be sure we're still naming the same device
670 * used for our initial mount
671 */
672 ump = VFSTOULFS(mp);
673 if (devvp != ump->um_devvp) {
674 if (devvp->v_rdev != ump->um_devvp->v_rdev)
675 error = EINVAL;
676 else {
677 vrele(devvp);
678 devvp = ump->um_devvp;
679 vref(devvp);
680 }
681 }
682 }
683 } else {
684 if (!update) {
685 /* New mounts must have a filename for the device */
686 return (EINVAL);
687 } else {
688 /* Use the extant mount */
689 ump = VFSTOULFS(mp);
690 devvp = ump->um_devvp;
691 vref(devvp);
692 }
693 }
694
695
696 /*
697 * If mount by non-root, then verify that user has necessary
698 * permissions on the device.
699 */
700 if (error == 0) {
701 accessmode = VREAD;
702 if (update ?
703 (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
704 (mp->mnt_flag & MNT_RDONLY) == 0)
705 accessmode |= VWRITE;
706 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
707 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
708 KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp,
709 KAUTH_ARG(accessmode));
710 VOP_UNLOCK(devvp);
711 }
712
713 if (error) {
714 vrele(devvp);
715 return (error);
716 }
717
718 if (!update) {
719 int flags;
720
721 if (mp->mnt_flag & MNT_RDONLY)
722 flags = FREAD;
723 else
724 flags = FREAD|FWRITE;
725 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
726 error = VOP_OPEN(devvp, flags, FSCRED);
727 VOP_UNLOCK(devvp);
728 if (error)
729 goto fail;
730 error = lfs_mountfs(devvp, mp, l); /* LFS */
731 if (error) {
732 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
733 (void)VOP_CLOSE(devvp, flags, NOCRED);
734 VOP_UNLOCK(devvp);
735 goto fail;
736 }
737
738 ump = VFSTOULFS(mp);
739 fs = ump->um_lfs;
740 } else {
741 /*
742 * Update the mount.
743 */
744
745 /*
746 * The initial mount got a reference on this
747 * device, so drop the one obtained via
748 * namei(), above.
749 */
750 vrele(devvp);
751
752 ump = VFSTOULFS(mp);
753 fs = ump->um_lfs;
754
755 if (fs->lfs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
756 /*
757 * Changing from read/write to read-only.
758 * XXX: shouldn't we sync here? or does vfs do that?
759 */
760 #ifdef LFS_QUOTA2
761 /* XXX: quotas should remain on when readonly */
762 if (fs->lfs_use_quota2) {
763 error = lfsquota2_umount(mp, 0);
764 if (error) {
765 return error;
766 }
767 }
768 #endif
769 }
770
771 if (fs->lfs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
772 /*
773 * Changing from read-only to read/write.
774 * Note in the superblocks that we're writing.
775 */
776
777 /* XXX: quotas should have been on even if readonly */
778 if (fs->lfs_use_quota2) {
779 #ifdef LFS_QUOTA2
780 error = lfs_quota2_mount(mp);
781 #else
782 uprintf("%s: no kernel support for this "
783 "filesystem's quotas\n",
784 mp->mnt_stat.f_mntonname);
785 if (mp->mnt_flag & MNT_FORCE) {
786 uprintf("%s: mounting anyway; "
787 "fsck afterwards\n",
788 mp->mnt_stat.f_mntonname);
789 } else {
790 error = EINVAL;
791 }
792 #endif
793 if (error) {
794 return error;
795 }
796 }
797
798 fs->lfs_ronly = 0;
799 if (lfs_sb_getpflags(fs) & LFS_PF_CLEAN) {
800 lfs_sb_setpflags(fs, lfs_sb_getpflags(fs) & ~LFS_PF_CLEAN);
801 lfs_writesuper(fs, lfs_sb_getsboff(fs, 0));
802 lfs_writesuper(fs, lfs_sb_getsboff(fs, 1));
803 }
804 }
805 if (args->fspec == NULL)
806 return EINVAL;
807 }
808
809 error = set_statvfs_info(path, UIO_USERSPACE, args->fspec,
810 UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
811 if (error == 0)
812 (void)strncpy(fs->lfs_dlfs.dlfs_fsmnt,
813 mp->mnt_stat.f_mntonname,
814 sizeof(fs->lfs_dlfs.dlfs_fsmnt));
815 return error;
816
817 fail:
818 vrele(devvp);
819 return (error);
820 }
821
822
823 /*
824 * Common code for mount and mountroot
825 * LFS specific
826 */
827 int
828 lfs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l)
829 {
830 struct dlfs *tdfs, *dfs, *adfs;
831 struct lfs *fs;
832 struct ulfsmount *ump;
833 struct vnode *vp;
834 struct buf *bp, *abp;
835 dev_t dev;
836 int error, i, ronly, fsbsize;
837 kauth_cred_t cred;
838 CLEANERINFO *cip;
839 SEGUSE *sup;
840 daddr_t sb_addr;
841
842 cred = l ? l->l_cred : NOCRED;
843
844 /*
845 * Flush out any old buffers remaining from a previous use.
846 */
847 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
848 error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
849 VOP_UNLOCK(devvp);
850 if (error)
851 return (error);
852
853 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
854
855 /* Don't free random space on error. */
856 bp = NULL;
857 abp = NULL;
858 ump = NULL;
859
860 sb_addr = LFS_LABELPAD / DEV_BSIZE;
861 while (1) {
862 /* Read in the superblock. */
863 error = bread(devvp, sb_addr, LFS_SBPAD, 0, &bp);
864 if (error)
865 goto out;
866 dfs = (struct dlfs *)bp->b_data;
867
868 /* Check the basics. */
869 if (dfs->dlfs_magic != LFS_MAGIC || dfs->dlfs_bsize > MAXBSIZE ||
870 dfs->dlfs_version > LFS_VERSION ||
871 dfs->dlfs_bsize < sizeof(struct dlfs)) {
872 DLOG((DLOG_MOUNT, "lfs_mountfs: primary superblock sanity failed\n"));
873 error = EINVAL; /* XXX needs translation */
874 goto out;
875 }
876 if (dfs->dlfs_inodefmt > LFS_MAXINODEFMT) {
877 DLOG((DLOG_MOUNT, "lfs_mountfs: unknown inode format %d\n",
878 dfs->dlfs_inodefmt));
879 error = EINVAL;
880 goto out;
881 }
882
883 if (dfs->dlfs_version == 1)
884 fsbsize = DEV_BSIZE;
885 else {
886 fsbsize = 1 << dfs->dlfs_ffshift;
887 /*
888 * Could be, if the frag size is large enough, that we
889 * don't have the "real" primary superblock. If that's
890 * the case, get the real one, and try again.
891 */
892 if (sb_addr != (dfs->dlfs_sboffs[0] << (dfs->dlfs_ffshift - DEV_BSHIFT))) {
893 DLOG((DLOG_MOUNT, "lfs_mountfs: sb daddr"
894 " 0x%llx is not right, trying 0x%llx\n",
895 (long long)sb_addr,
896 (long long)(dfs->dlfs_sboffs[0] << (dfs->dlfs_ffshift - DEV_BSHIFT))));
897 sb_addr = dfs->dlfs_sboffs[0] << (dfs->dlfs_ffshift - DEV_BSHIFT);
898 brelse(bp, 0);
899 continue;
900 }
901 }
902 break;
903 }
904
905 /*
906 * Check the second superblock to see which is newer; then mount
907 * using the older of the two. This is necessary to ensure that
908 * the filesystem is valid if it was not unmounted cleanly.
909 */
910
911 if (dfs->dlfs_sboffs[1] &&
912 dfs->dlfs_sboffs[1] - LFS_LABELPAD / fsbsize > LFS_SBPAD / fsbsize)
913 {
914 error = bread(devvp, dfs->dlfs_sboffs[1] * (fsbsize / DEV_BSIZE),
915 LFS_SBPAD, 0, &abp);
916 if (error)
917 goto out;
918 adfs = (struct dlfs *)abp->b_data;
919
920 if (dfs->dlfs_version == 1) {
921 /* 1s resolution comparison */
922 if (adfs->dlfs_tstamp < dfs->dlfs_tstamp)
923 tdfs = adfs;
924 else
925 tdfs = dfs;
926 } else {
927 /* monotonic infinite-resolution comparison */
928 if (adfs->dlfs_serial < dfs->dlfs_serial)
929 tdfs = adfs;
930 else
931 tdfs = dfs;
932 }
933
934 /* Check the basics. */
935 if (tdfs->dlfs_magic != LFS_MAGIC ||
936 tdfs->dlfs_bsize > MAXBSIZE ||
937 tdfs->dlfs_version > LFS_VERSION ||
938 tdfs->dlfs_bsize < sizeof(struct dlfs)) {
939 DLOG((DLOG_MOUNT, "lfs_mountfs: alt superblock"
940 " sanity failed\n"));
941 error = EINVAL; /* XXX needs translation */
942 goto out;
943 }
944 } else {
945 DLOG((DLOG_MOUNT, "lfs_mountfs: invalid alt superblock"
946 " daddr=0x%x\n", dfs->dlfs_sboffs[1]));
947 error = EINVAL;
948 goto out;
949 }
950
951 /* Allocate the mount structure, copy the superblock into it. */
952 fs = kmem_zalloc(sizeof(struct lfs), KM_SLEEP);
953 memcpy(&fs->lfs_dlfs, tdfs, sizeof(struct dlfs));
954
955 /* Compatibility */
956 if (fs->lfs_version < 2) {
957 lfs_sb_setsumsize(fs, LFS_V1_SUMMARY_SIZE);
958 lfs_sb_setibsize(fs, lfs_sb_getbsize(fs));
959 lfs_sb_sets0addr(fs, lfs_sb_getsboff(fs, 0));
960 lfs_sb_settstamp(fs, lfs_sb_getotstamp(fs));
961 lfs_sb_setfsbtodb(fs, 0);
962 }
963 if (lfs_sb_getresvseg(fs) == 0)
964 lfs_sb_setresvseg(fs, MIN(lfs_sb_getminfreeseg(fs) - 1, \
965 MAX(MIN_RESV_SEGS, lfs_sb_getminfreeseg(fs) / 2 + 1)));
966
967 /*
968 * If we aren't going to be able to write meaningfully to this
969 * filesystem, and were not mounted readonly, bomb out now.
970 */
971 if (lfs_fsbtob(fs, LFS_NRESERVE(fs)) > LFS_MAX_BYTES && !ronly) {
972 DLOG((DLOG_MOUNT, "lfs_mount: to mount this filesystem read/write,"
973 " we need BUFPAGES >= %lld\n",
974 (long long)((bufmem_hiwater / bufmem_lowater) *
975 LFS_INVERSE_MAX_BYTES(
976 lfs_fsbtob(fs, LFS_NRESERVE(fs))) >> PAGE_SHIFT)));
977 kmem_free(fs, sizeof(struct lfs));
978 error = EFBIG; /* XXX needs translation */
979 goto out;
980 }
981
982 /* Before rolling forward, lock so vget will sleep for other procs */
983 if (l != NULL) {
984 fs->lfs_flags = LFS_NOTYET;
985 fs->lfs_rfpid = l->l_proc->p_pid;
986 }
987
988 ump = kmem_zalloc(sizeof(*ump), KM_SLEEP);
989 ump->um_lfs = fs;
990 ump->um_fstype = ULFS1;
991 /* ump->um_cleaner_thread = NULL; */
992 if (sizeof(struct lfs) < LFS_SBPAD) { /* XXX why? */
993 brelse(bp, BC_INVAL);
994 brelse(abp, BC_INVAL);
995 } else {
996 brelse(bp, 0);
997 brelse(abp, 0);
998 }
999 bp = NULL;
1000 abp = NULL;
1001
1002
1003 /* Set up the I/O information */
1004 fs->lfs_devbsize = DEV_BSIZE;
1005 fs->lfs_iocount = 0;
1006 fs->lfs_diropwait = 0;
1007 fs->lfs_activesb = 0;
1008 lfs_sb_setuinodes(fs, 0);
1009 fs->lfs_ravail = 0;
1010 fs->lfs_favail = 0;
1011 fs->lfs_sbactive = 0;
1012
1013 /* Set up the ifile and lock aflags */
1014 fs->lfs_doifile = 0;
1015 fs->lfs_writer = 0;
1016 fs->lfs_dirops = 0;
1017 fs->lfs_nadirop = 0;
1018 fs->lfs_seglock = 0;
1019 fs->lfs_pdflush = 0;
1020 fs->lfs_sleepers = 0;
1021 fs->lfs_pages = 0;
1022 rw_init(&fs->lfs_fraglock);
1023 rw_init(&fs->lfs_iflock);
1024 cv_init(&fs->lfs_stopcv, "lfsstop");
1025
1026 /* Set the file system readonly/modify bits. */
1027 fs->lfs_ronly = ronly;
1028 if (ronly == 0)
1029 fs->lfs_fmod = 1;
1030
1031 /* ulfs-level information */
1032 fs->um_flags = 0;
1033 fs->um_bptrtodb = lfs_sb_getffshift(fs) - DEV_BSHIFT;
1034 fs->um_seqinc = lfs_sb_getfrag(fs);
1035 fs->um_nindir = lfs_sb_getnindir(fs);
1036 fs->um_lognindir = ffs(lfs_sb_getnindir(fs)) - 1;
1037 fs->um_maxsymlinklen = lfs_sb_getmaxsymlinklen(fs);
1038 fs->um_dirblksiz = LFS_DIRBLKSIZ;
1039 fs->um_maxfilesize = lfs_sb_getmaxfilesize(fs);
1040
1041 /* quota stuff */
1042 /* XXX: these need to come from the on-disk superblock to be used */
1043 fs->lfs_use_quota2 = 0;
1044 fs->lfs_quota_magic = 0;
1045 fs->lfs_quota_flags = 0;
1046 fs->lfs_quotaino[0] = 0;
1047 fs->lfs_quotaino[1] = 0;
1048
1049 /* Initialize the mount structure. */
1050 dev = devvp->v_rdev;
1051 mp->mnt_data = ump;
1052 mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
1053 mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_LFS);
1054 mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
1055 mp->mnt_stat.f_namemax = LFS_MAXNAMLEN;
1056 mp->mnt_stat.f_iosize = lfs_sb_getbsize(fs);
1057 mp->mnt_flag |= MNT_LOCAL;
1058 mp->mnt_fs_bshift = lfs_sb_getbshift(fs);
1059 if (fs->um_maxsymlinklen > 0)
1060 mp->mnt_iflag |= IMNT_DTYPE;
1061
1062 ump->um_mountp = mp;
1063 ump->um_dev = dev;
1064 ump->um_devvp = devvp;
1065 for (i = 0; i < ULFS_MAXQUOTAS; i++)
1066 ump->um_quotas[i] = NULLVP;
1067 spec_node_setmountedfs(devvp, mp);
1068
1069 /* Set up reserved memory for pageout */
1070 lfs_setup_resblks(fs);
1071 /* Set up vdirop tailq */
1072 TAILQ_INIT(&fs->lfs_dchainhd);
1073 /* and paging tailq */
1074 TAILQ_INIT(&fs->lfs_pchainhd);
1075 /* and delayed segment accounting for truncation list */
1076 LIST_INIT(&fs->lfs_segdhd);
1077
1078 /*
1079 * We use the ifile vnode for almost every operation. Instead of
1080 * retrieving it from the hash table each time we retrieve it here,
1081 * artificially increment the reference count and keep a pointer
1082 * to it in the incore copy of the superblock.
1083 */
1084 if ((error = VFS_VGET(mp, LFS_IFILE_INUM, &vp)) != 0) {
1085 DLOG((DLOG_MOUNT, "lfs_mountfs: ifile vget failed, error=%d\n", error));
1086 goto out;
1087 }
1088 fs->lfs_ivnode = vp;
1089 vref(vp);
1090
1091 /* Set up inode bitmap and order free list */
1092 lfs_order_freelist(fs);
1093
1094 /* Set up segment usage flags for the autocleaner. */
1095 fs->lfs_nactive = 0;
1096 fs->lfs_suflags = malloc(2 * sizeof(u_int32_t *),
1097 M_SEGMENT, M_WAITOK);
1098 fs->lfs_suflags[0] = malloc(lfs_sb_getnseg(fs) * sizeof(u_int32_t),
1099 M_SEGMENT, M_WAITOK);
1100 fs->lfs_suflags[1] = malloc(lfs_sb_getnseg(fs) * sizeof(u_int32_t),
1101 M_SEGMENT, M_WAITOK);
1102 memset(fs->lfs_suflags[1], 0, lfs_sb_getnseg(fs) * sizeof(u_int32_t));
1103 for (i = 0; i < lfs_sb_getnseg(fs); i++) {
1104 int changed;
1105
1106 LFS_SEGENTRY(sup, fs, i, bp);
1107 changed = 0;
1108 if (!ronly) {
1109 if (sup->su_nbytes == 0 &&
1110 !(sup->su_flags & SEGUSE_EMPTY)) {
1111 sup->su_flags |= SEGUSE_EMPTY;
1112 ++changed;
1113 } else if (!(sup->su_nbytes == 0) &&
1114 (sup->su_flags & SEGUSE_EMPTY)) {
1115 sup->su_flags &= ~SEGUSE_EMPTY;
1116 ++changed;
1117 }
1118 if (sup->su_flags & (SEGUSE_ACTIVE|SEGUSE_INVAL)) {
1119 sup->su_flags &= ~(SEGUSE_ACTIVE|SEGUSE_INVAL);
1120 ++changed;
1121 }
1122 }
1123 fs->lfs_suflags[0][i] = sup->su_flags;
1124 if (changed)
1125 LFS_WRITESEGENTRY(sup, fs, i, bp);
1126 else
1127 brelse(bp, 0);
1128 }
1129
1130 /*
1131 * XXX: if the fs has quotas, quotas should be on even if
1132 * readonly. Otherwise you can't query the quota info!
1133 * However, that's not how the quota2 code got written and I
1134 * don't know if it'll behave itself if enabled while
1135 * readonly, so for now use the same enable logic as ffs.
1136 *
1137 * XXX: also, if you use the -f behavior allowed here (and
1138 * equivalently above for remount) it will corrupt the fs. It
1139 * ought not to allow that. It should allow mounting readonly
1140 * if there are quotas and the kernel doesn't have the quota
1141 * code, but only readonly.
1142 *
1143 * XXX: and if you use the -f behavior allowed here it will
1144 * likely crash at unmount time (or remount time) because we
1145 * think quotas are active.
1146 *
1147 * Although none of this applies until there's a way to set
1148 * lfs_use_quota2 and have quotas in the fs at all.
1149 */
1150 if (!ronly && fs->lfs_use_quota2) {
1151 #ifdef LFS_QUOTA2
1152 error = lfs_quota2_mount(mp);
1153 #else
1154 uprintf("%s: no kernel support for this filesystem's quotas\n",
1155 mp->mnt_stat.f_mntonname);
1156 if (mp->mnt_flag & MNT_FORCE) {
1157 uprintf("%s: mounting anyway; fsck afterwards\n",
1158 mp->mnt_stat.f_mntonname);
1159 } else {
1160 error = EINVAL;
1161 }
1162 #endif
1163 if (error) {
1164 /* XXX XXX must clean up the stuff immediately above */
1165 printf("lfs_mountfs: sorry, leaking some memory\n");
1166 goto out;
1167 }
1168 }
1169
1170 #ifdef LFS_EXTATTR
1171 /*
1172 * Initialize file-backed extended attributes for ULFS1 file
1173 * systems.
1174 *
1175 * XXX: why is this limited to ULFS1?
1176 */
1177 if (ump->um_fstype == ULFS1) {
1178 ulfs_extattr_uepm_init(&ump->um_extattr);
1179 }
1180 #endif
1181
1182 #ifdef LFS_KERNEL_RFW
1183 lfs_roll_forward(fs, mp, l);
1184 #endif
1185
1186 /* If writing, sb is not clean; record in case of immediate crash */
1187 if (!fs->lfs_ronly) {
1188 lfs_sb_setpflags(fs, lfs_sb_getpflags(fs) & ~LFS_PF_CLEAN);
1189 lfs_writesuper(fs, lfs_sb_getsboff(fs, 0));
1190 lfs_writesuper(fs, lfs_sb_getsboff(fs, 1));
1191 }
1192
1193 /* Allow vget now that roll-forward is complete */
1194 fs->lfs_flags &= ~(LFS_NOTYET);
1195 wakeup(&fs->lfs_flags);
1196
1197 /*
1198 * Initialize the ifile cleaner info with information from
1199 * the superblock.
1200 */
1201 LFS_CLEANERINFO(cip, fs, bp);
1202 cip->clean = lfs_sb_getnclean(fs);
1203 cip->dirty = lfs_sb_getnseg(fs) - lfs_sb_getnclean(fs);
1204 cip->avail = lfs_sb_getavail(fs);
1205 cip->bfree = lfs_sb_getbfree(fs);
1206 (void) LFS_BWRITE_LOG(bp); /* Ifile */
1207
1208 /*
1209 * Mark the current segment as ACTIVE, since we're going to
1210 * be writing to it.
1211 */
1212 LFS_SEGENTRY(sup, fs, lfs_dtosn(fs, lfs_sb_getoffset(fs)), bp);
1213 sup->su_flags |= SEGUSE_DIRTY | SEGUSE_ACTIVE;
1214 fs->lfs_nactive++;
1215 LFS_WRITESEGENTRY(sup, fs, lfs_dtosn(fs, lfs_sb_getoffset(fs)), bp); /* Ifile */
1216
1217 /* Now that roll-forward is done, unlock the Ifile */
1218 vput(vp);
1219
1220 /* Start the pagedaemon-anticipating daemon */
1221 mutex_enter(&lfs_lock);
1222 if (lfs_writer_daemon == 0 && lfs_writer_lid == 0 &&
1223 kthread_create(PRI_BIO, 0, NULL,
1224 lfs_writerd, NULL, NULL, "lfs_writer") != 0)
1225 panic("fork lfs_writer");
1226 mutex_exit(&lfs_lock);
1227
1228 printf("WARNING: the log-structured file system is experimental\n"
1229 "WARNING: it may cause system crashes and/or corrupt data\n");
1230
1231 return (0);
1232
1233 out:
1234 if (bp)
1235 brelse(bp, 0);
1236 if (abp)
1237 brelse(abp, 0);
1238 if (ump) {
1239 kmem_free(ump->um_lfs, sizeof(struct lfs));
1240 kmem_free(ump, sizeof(*ump));
1241 mp->mnt_data = NULL;
1242 }
1243
1244 return (error);
1245 }
1246
1247 /*
1248 * unmount system call
1249 */
1250 int
1251 lfs_unmount(struct mount *mp, int mntflags)
1252 {
1253 struct lwp *l = curlwp;
1254 struct ulfsmount *ump;
1255 struct lfs *fs;
1256 int error, flags, ronly;
1257 vnode_t *vp;
1258
1259 flags = 0;
1260 if (mntflags & MNT_FORCE)
1261 flags |= FORCECLOSE;
1262
1263 ump = VFSTOULFS(mp);
1264 fs = ump->um_lfs;
1265
1266 /* Two checkpoints */
1267 lfs_segwrite(mp, SEGM_CKP | SEGM_SYNC);
1268 lfs_segwrite(mp, SEGM_CKP | SEGM_SYNC);
1269
1270 /* wake up the cleaner so it can die */
1271 /* XXX: shouldn't this be *after* the error cases below? */
1272 lfs_wakeup_cleaner(fs);
1273 mutex_enter(&lfs_lock);
1274 while (fs->lfs_sleepers)
1275 mtsleep(&fs->lfs_sleepers, PRIBIO + 1, "lfs_sleepers", 0,
1276 &lfs_lock);
1277 mutex_exit(&lfs_lock);
1278
1279 #ifdef LFS_EXTATTR
1280 if (ump->um_fstype == ULFS1) {
1281 if (ump->um_extattr.uepm_flags & ULFS_EXTATTR_UEPM_STARTED) {
1282 ulfs_extattr_stop(mp, curlwp);
1283 }
1284 if (ump->um_extattr.uepm_flags & ULFS_EXTATTR_UEPM_INITIALIZED) {
1285 ulfs_extattr_uepm_destroy(&ump->um_extattr);
1286 }
1287 }
1288 #endif
1289 #ifdef LFS_QUOTA
1290 if ((error = lfsquota1_umount(mp, flags)) != 0)
1291 return (error);
1292 #endif
1293 #ifdef LFS_QUOTA2
1294 if ((error = lfsquota2_umount(mp, flags)) != 0)
1295 return (error);
1296 #endif
1297 if ((error = vflush(mp, fs->lfs_ivnode, flags)) != 0)
1298 return (error);
1299 if ((error = VFS_SYNC(mp, 1, l->l_cred)) != 0)
1300 return (error);
1301 vp = fs->lfs_ivnode;
1302 mutex_enter(vp->v_interlock);
1303 if (LIST_FIRST(&vp->v_dirtyblkhd))
1304 panic("lfs_unmount: still dirty blocks on ifile vnode");
1305 mutex_exit(vp->v_interlock);
1306
1307 /* Explicitly write the superblock, to update serial and pflags */
1308 lfs_sb_setpflags(fs, lfs_sb_getpflags(fs) | LFS_PF_CLEAN);
1309 lfs_writesuper(fs, lfs_sb_getsboff(fs, 0));
1310 lfs_writesuper(fs, lfs_sb_getsboff(fs, 1));
1311 mutex_enter(&lfs_lock);
1312 while (fs->lfs_iocount)
1313 mtsleep(&fs->lfs_iocount, PRIBIO + 1, "lfs_umount", 0,
1314 &lfs_lock);
1315 mutex_exit(&lfs_lock);
1316
1317 /* Finish with the Ifile, now that we're done with it */
1318 vgone(fs->lfs_ivnode);
1319
1320 ronly = !fs->lfs_ronly;
1321 if (ump->um_devvp->v_type != VBAD)
1322 spec_node_setmountedfs(ump->um_devvp, NULL);
1323 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1324 error = VOP_CLOSE(ump->um_devvp,
1325 ronly ? FREAD : FREAD|FWRITE, NOCRED);
1326 vput(ump->um_devvp);
1327
1328 /* Complain about page leakage */
1329 if (fs->lfs_pages > 0)
1330 printf("lfs_unmount: still claim %d pages (%d in subsystem)\n",
1331 fs->lfs_pages, lfs_subsys_pages);
1332
1333 /* Free per-mount data structures */
1334 free(fs->lfs_ino_bitmap, M_SEGMENT);
1335 free(fs->lfs_suflags[0], M_SEGMENT);
1336 free(fs->lfs_suflags[1], M_SEGMENT);
1337 free(fs->lfs_suflags, M_SEGMENT);
1338 lfs_free_resblks(fs);
1339 cv_destroy(&fs->lfs_stopcv);
1340 rw_destroy(&fs->lfs_fraglock);
1341 rw_destroy(&fs->lfs_iflock);
1342
1343 kmem_free(fs, sizeof(struct lfs));
1344 kmem_free(ump, sizeof(*ump));
1345
1346 mp->mnt_data = NULL;
1347 mp->mnt_flag &= ~MNT_LOCAL;
1348 return (error);
1349 }
1350
1351 /*
1352 * Get file system statistics.
1353 *
1354 * NB: We don't lock to access the superblock here, because it's not
1355 * really that important if we get it wrong.
1356 */
1357 int
1358 lfs_statvfs(struct mount *mp, struct statvfs *sbp)
1359 {
1360 struct lfs *fs;
1361 struct ulfsmount *ump;
1362
1363 ump = VFSTOULFS(mp);
1364 fs = ump->um_lfs;
1365 if (fs->lfs_magic != LFS_MAGIC)
1366 panic("lfs_statvfs: magic");
1367
1368 sbp->f_bsize = lfs_sb_getbsize(fs);
1369 sbp->f_frsize = lfs_sb_getfsize(fs);
1370 sbp->f_iosize = lfs_sb_getbsize(fs);
1371 sbp->f_blocks = LFS_EST_NONMETA(fs) - VTOI(fs->lfs_ivnode)->i_lfs_effnblks;
1372
1373 sbp->f_bfree = LFS_EST_BFREE(fs);
1374 KASSERT(sbp->f_bfree <= lfs_sb_getdsize(fs));
1375 #if 0
1376 if (sbp->f_bfree < 0)
1377 sbp->f_bfree = 0;
1378 #endif
1379
1380 sbp->f_bresvd = LFS_EST_RSVD(fs);
1381 if (sbp->f_bfree > sbp->f_bresvd)
1382 sbp->f_bavail = sbp->f_bfree - sbp->f_bresvd;
1383 else
1384 sbp->f_bavail = 0;
1385
1386 sbp->f_files = lfs_sb_getbfree(fs) / lfs_btofsb(fs, lfs_sb_getibsize(fs))
1387 * LFS_INOPB(fs);
1388 sbp->f_ffree = sbp->f_files - lfs_sb_getnfiles(fs);
1389 sbp->f_favail = sbp->f_ffree;
1390 sbp->f_fresvd = 0;
1391 copy_statvfs_info(sbp, mp);
1392 return (0);
1393 }
1394
1395 /*
1396 * Go through the disk queues to initiate sandbagged IO;
1397 * go through the inodes to write those that have been modified;
1398 * initiate the writing of the super block if it has been modified.
1399 *
1400 * Note: we are always called with the filesystem marked `MPBUSY'.
1401 */
1402 int
1403 lfs_sync(struct mount *mp, int waitfor, kauth_cred_t cred)
1404 {
1405 int error;
1406 struct lfs *fs;
1407
1408 fs = VFSTOULFS(mp)->um_lfs;
1409 if (fs->lfs_ronly)
1410 return 0;
1411
1412 /* Snapshots should not hose the syncer */
1413 /*
1414 * XXX Sync can block here anyway, since we don't have a very
1415 * XXX good idea of how much data is pending. If it's more
1416 * XXX than a segment and lfs_nextseg is close to the end of
1417 * XXX the log, we'll likely block.
1418 */
1419 mutex_enter(&lfs_lock);
1420 if (fs->lfs_nowrap && lfs_sb_getnextseg(fs) < lfs_sb_getcurseg(fs)) {
1421 mutex_exit(&lfs_lock);
1422 return 0;
1423 }
1424 mutex_exit(&lfs_lock);
1425
1426 lfs_writer_enter(fs, "lfs_dirops");
1427
1428 /* All syncs must be checkpoints until roll-forward is implemented. */
1429 DLOG((DLOG_FLUSH, "lfs_sync at 0x%jx\n",
1430 (uintmax_t)lfs_sb_getoffset(fs)));
1431 error = lfs_segwrite(mp, SEGM_CKP | (waitfor ? SEGM_SYNC : 0));
1432 lfs_writer_leave(fs);
1433 #ifdef LFS_QUOTA
1434 lfs_qsync(mp);
1435 #endif
1436 return (error);
1437 }
1438
1439 /*
1440 * Look up an LFS dinode number to find its incore vnode. If not already
1441 * in core, read it in from the specified device. Return the inode locked.
1442 * Detection and handling of mount points must be done by the calling routine.
1443 */
1444 int
1445 lfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
1446 {
1447 int error;
1448
1449 error = vcache_get(mp, &ino, sizeof(ino), vpp);
1450 if (error)
1451 return error;
1452 error = vn_lock(*vpp, LK_EXCLUSIVE);
1453 if (error) {
1454 vrele(*vpp);
1455 *vpp = NULL;
1456 return error;
1457 }
1458
1459 return 0;
1460 }
1461
1462 /*
1463 * Create a new vnode/inode pair and initialize what fields we can.
1464 */
1465 static void
1466 lfs_init_vnode(struct ulfsmount *ump, ino_t ino, struct vnode *vp)
1467 {
1468 struct inode *ip;
1469 struct ulfs1_dinode *dp;
1470
1471 ASSERT_NO_SEGLOCK(ump->um_lfs);
1472
1473 /* Initialize the inode. */
1474 ip = pool_get(&lfs_inode_pool, PR_WAITOK);
1475 memset(ip, 0, sizeof(*ip));
1476 dp = pool_get(&lfs_dinode_pool, PR_WAITOK);
1477 memset(dp, 0, sizeof(*dp));
1478 ip->inode_ext.lfs = pool_get(&lfs_inoext_pool, PR_WAITOK);
1479 memset(ip->inode_ext.lfs, 0, sizeof(*ip->inode_ext.lfs));
1480 ip->i_din.ffs1_din = dp;
1481 ip->i_ump = ump;
1482 ip->i_vnode = vp;
1483 ip->i_dev = ump->um_dev;
1484 ip->i_number = dp->di_inumber = ino;
1485 ip->i_lfs = ump->um_lfs;
1486 ip->i_lfs_effnblks = 0;
1487 SPLAY_INIT(&ip->i_lfs_lbtree);
1488 ip->i_lfs_nbtree = 0;
1489 LIST_INIT(&ip->i_lfs_segdhd);
1490
1491 vp->v_tag = VT_LFS;
1492 vp->v_op = lfs_vnodeop_p;
1493 vp->v_data = ip;
1494 }
1495
1496 /*
1497 * Undo lfs_init_vnode().
1498 */
1499 static void
1500 lfs_deinit_vnode(struct ulfsmount *ump, struct vnode *vp)
1501 {
1502 struct inode *ip = VTOI(vp);
1503
1504 pool_put(&lfs_inoext_pool, ip->inode_ext.lfs);
1505 pool_put(&lfs_dinode_pool, ip->i_din.ffs1_din);
1506 pool_put(&lfs_inode_pool, ip);
1507 vp->v_data = NULL;
1508 }
1509
1510 /*
1511 * Read an inode from disk and initialize this vnode / inode pair.
1512 * Caller assures no other thread will try to load this inode.
1513 */
1514 int
1515 lfs_loadvnode(struct mount *mp, struct vnode *vp,
1516 const void *key, size_t key_len, const void **new_key)
1517 {
1518 struct lfs *fs;
1519 struct ulfs1_dinode *dip;
1520 struct inode *ip;
1521 struct buf *bp;
1522 struct ifile *ifp;
1523 struct ulfsmount *ump;
1524 ino_t ino;
1525 daddr_t daddr;
1526 int error, retries;
1527 struct timespec ts;
1528
1529 KASSERT(key_len == sizeof(ino));
1530 memcpy(&ino, key, key_len);
1531
1532 memset(&ts, 0, sizeof ts); /* XXX gcc */
1533
1534 ump = VFSTOULFS(mp);
1535 fs = ump->um_lfs;
1536
1537 /*
1538 * If the filesystem is not completely mounted yet, suspend
1539 * any access requests (wait for roll-forward to complete).
1540 */
1541 mutex_enter(&lfs_lock);
1542 while ((fs->lfs_flags & LFS_NOTYET) && curproc->p_pid != fs->lfs_rfpid)
1543 mtsleep(&fs->lfs_flags, PRIBIO+1, "lfs_notyet", 0,
1544 &lfs_lock);
1545 mutex_exit(&lfs_lock);
1546
1547 /* Translate the inode number to a disk address. */
1548 if (ino == LFS_IFILE_INUM)
1549 daddr = lfs_sb_getidaddr(fs);
1550 else {
1551 /* XXX bounds-check this too */
1552 LFS_IENTRY(ifp, fs, ino, bp);
1553 daddr = ifp->if_daddr;
1554 if (fs->lfs_version > 1) {
1555 ts.tv_sec = ifp->if_atime_sec;
1556 ts.tv_nsec = ifp->if_atime_nsec;
1557 }
1558
1559 brelse(bp, 0);
1560 if (daddr == LFS_UNUSED_DADDR)
1561 return (ENOENT);
1562 }
1563
1564 /* Allocate/init new vnode/inode. */
1565 lfs_init_vnode(ump, ino, vp);
1566 ip = VTOI(vp);
1567
1568 /* If the cleaner supplied the inode, use it. */
1569 if (curlwp == ump->um_cleaner_thread && ump->um_cleaner_hint != NULL &&
1570 ump->um_cleaner_hint->bi_lbn == LFS_UNUSED_LBN) {
1571 dip = ump->um_cleaner_hint->bi_bp;
1572 error = copyin(dip, ip->i_din.ffs1_din,
1573 sizeof(struct ulfs1_dinode));
1574 if (error) {
1575 lfs_deinit_vnode(ump, vp);
1576 return error;
1577 }
1578 KASSERT(ip->i_number == ino);
1579 goto out;
1580 }
1581
1582 /* Read in the disk contents for the inode, copy into the inode. */
1583 retries = 0;
1584 again:
1585 error = bread(ump->um_devvp, LFS_FSBTODB(fs, daddr),
1586 (fs->lfs_version == 1 ? lfs_sb_getbsize(fs) : lfs_sb_getibsize(fs)),
1587 0, &bp);
1588 if (error) {
1589 lfs_deinit_vnode(ump, vp);
1590 return error;
1591 }
1592
1593 dip = lfs_ifind(fs, ino, bp);
1594 if (dip == NULL) {
1595 /* Assume write has not completed yet; try again */
1596 brelse(bp, BC_INVAL);
1597 ++retries;
1598 if (retries <= LFS_IFIND_RETRIES) {
1599 mutex_enter(&lfs_lock);
1600 if (fs->lfs_iocount) {
1601 DLOG((DLOG_VNODE,
1602 "%s: dinode %d not found, retrying...\n",
1603 __func__, ino));
1604 (void)mtsleep(&fs->lfs_iocount, PRIBIO + 1,
1605 "lfs ifind", 1, &lfs_lock);
1606 } else
1607 retries = LFS_IFIND_RETRIES;
1608 mutex_exit(&lfs_lock);
1609 goto again;
1610 }
1611 #ifdef DEBUG
1612 /* If the seglock is held look at the bpp to see
1613 what is there anyway */
1614 mutex_enter(&lfs_lock);
1615 if (fs->lfs_seglock > 0) {
1616 struct buf **bpp;
1617 struct ulfs1_dinode *dp;
1618 int i;
1619
1620 for (bpp = fs->lfs_sp->bpp;
1621 bpp != fs->lfs_sp->cbpp; ++bpp) {
1622 if ((*bpp)->b_vp == fs->lfs_ivnode &&
1623 bpp != fs->lfs_sp->bpp) {
1624 /* Inode block */
1625 printf("%s: block 0x%" PRIx64 ": ",
1626 __func__, (*bpp)->b_blkno);
1627 dp = (struct ulfs1_dinode *)
1628 (*bpp)->b_data;
1629 for (i = 0; i < LFS_INOPB(fs); i++)
1630 if (dp[i].di_inumber)
1631 printf("%d ",
1632 dp[i].di_inumber);
1633 printf("\n");
1634 }
1635 }
1636 }
1637 mutex_exit(&lfs_lock);
1638 #endif /* DEBUG */
1639 panic("lfs_loadvnode: dinode not found");
1640 }
1641 *ip->i_din.ffs1_din = *dip;
1642 brelse(bp, 0);
1643
1644 out:
1645 if (fs->lfs_version > 1) {
1646 ip->i_ffs1_atime = ts.tv_sec;
1647 ip->i_ffs1_atimensec = ts.tv_nsec;
1648 }
1649
1650 lfs_vinit(mp, &vp);
1651
1652 *new_key = &ip->i_number;
1653 return 0;
1654 }
1655
1656 /*
1657 * Create a new inode and initialize this vnode / inode pair.
1658 */
1659 int
1660 lfs_newvnode(struct mount *mp, struct vnode *dvp, struct vnode *vp,
1661 struct vattr *vap, kauth_cred_t cred,
1662 size_t *key_len, const void **new_key)
1663 {
1664 ino_t ino;
1665 struct inode *ip;
1666 struct ulfsmount *ump;
1667 struct lfs *fs;
1668 int error, mode, gen;
1669
1670 KASSERT(dvp != NULL || vap->va_fileid > 0);
1671 KASSERT(dvp != NULL && dvp->v_mount == mp);
1672 KASSERT(vap->va_type != VNON);
1673
1674 *key_len = sizeof(ino);
1675 ump = VFSTOULFS(mp);
1676 fs = ump->um_lfs;
1677 mode = MAKEIMODE(vap->va_type, vap->va_mode);
1678
1679 /*
1680 * Allocate fresh inode. With "dvp == NULL" take the inode number
1681 * and version from "vap".
1682 */
1683 if (dvp == NULL) {
1684 ino = vap->va_fileid;
1685 gen = vap->va_gen;
1686 error = lfs_valloc_fixed(fs, ino, gen);
1687 } else {
1688 error = lfs_valloc(dvp, mode, cred, &ino, &gen);
1689 }
1690 if (error)
1691 return error;
1692
1693 /* Attach inode to vnode. */
1694 lfs_init_vnode(ump, ino, vp);
1695 ip = VTOI(vp);
1696
1697 mutex_enter(&lfs_lock);
1698 LFS_SET_UINO(ip, IN_CHANGE);
1699 mutex_exit(&lfs_lock);
1700
1701 /* Note no blocks yet */
1702 ip->i_lfs_hiblk = -1;
1703
1704 /* Set a new generation number for this inode. */
1705 ip->i_gen = gen;
1706 ip->i_ffs1_gen = gen;
1707
1708 memset(ip->i_lfs_fragsize, 0,
1709 ULFS_NDADDR * sizeof(*ip->i_lfs_fragsize));
1710
1711 /* Set uid / gid. */
1712 if (cred == NOCRED || cred == FSCRED) {
1713 ip->i_gid = 0;
1714 ip->i_uid = 0;
1715 } else {
1716 ip->i_gid = VTOI(dvp)->i_gid;
1717 ip->i_uid = kauth_cred_geteuid(cred);
1718 }
1719 DIP_ASSIGN(ip, gid, ip->i_gid);
1720 DIP_ASSIGN(ip, uid, ip->i_uid);
1721
1722 #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
1723 error = lfs_chkiq(ip, 1, cred, 0);
1724 if (error) {
1725 lfs_vfree(dvp, ino, mode);
1726 lfs_deinit_vnode(ump, vp);
1727
1728 return error;
1729 }
1730 #endif
1731
1732 /* Set type and finalize. */
1733 ip->i_flags = 0;
1734 DIP_ASSIGN(ip, flags, 0);
1735 ip->i_mode = mode;
1736 DIP_ASSIGN(ip, mode, mode);
1737 if (vap->va_rdev != VNOVAL) {
1738 /*
1739 * Want to be able to use this to make badblock
1740 * inodes, so don't truncate the dev number.
1741 */
1742 if (ump->um_fstype == ULFS1)
1743 ip->i_ffs1_rdev = ulfs_rw32(vap->va_rdev,
1744 ULFS_MPNEEDSWAP(fs));
1745 else
1746 ip->i_ffs2_rdev = ulfs_rw64(vap->va_rdev,
1747 ULFS_MPNEEDSWAP(fs));
1748 }
1749 lfs_vinit(mp, &vp);
1750
1751 *new_key = &ip->i_number;
1752 return 0;
1753 }
1754
1755 /*
1756 * File handle to vnode
1757 */
1758 int
1759 lfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
1760 {
1761 struct lfid lfh;
1762 struct lfs *fs;
1763
1764 if (fhp->fid_len != sizeof(struct lfid))
1765 return EINVAL;
1766
1767 memcpy(&lfh, fhp, sizeof(lfh));
1768 if (lfh.lfid_ino < LFS_IFILE_INUM)
1769 return ESTALE;
1770
1771 fs = VFSTOULFS(mp)->um_lfs;
1772 if (lfh.lfid_ident != lfs_sb_getident(fs))
1773 return ESTALE;
1774
1775 if (lfh.lfid_ino >
1776 ((VTOI(fs->lfs_ivnode)->i_ffs1_size >> lfs_sb_getbshift(fs)) -
1777 lfs_sb_getcleansz(fs) - lfs_sb_getsegtabsz(fs)) * lfs_sb_getifpb(fs))
1778 return ESTALE;
1779
1780 return (ulfs_fhtovp(mp, &lfh.lfid_ufid, vpp));
1781 }
1782
1783 /*
1784 * Vnode pointer to File handle
1785 */
1786 /* ARGSUSED */
1787 int
1788 lfs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
1789 {
1790 struct inode *ip;
1791 struct lfid lfh;
1792
1793 if (*fh_size < sizeof(struct lfid)) {
1794 *fh_size = sizeof(struct lfid);
1795 return E2BIG;
1796 }
1797 *fh_size = sizeof(struct lfid);
1798 ip = VTOI(vp);
1799 memset(&lfh, 0, sizeof(lfh));
1800 lfh.lfid_len = sizeof(struct lfid);
1801 lfh.lfid_ino = ip->i_number;
1802 lfh.lfid_gen = ip->i_gen;
1803 lfh.lfid_ident = lfs_sb_getident(ip->i_lfs);
1804 memcpy(fhp, &lfh, sizeof(lfh));
1805 return (0);
1806 }
1807
1808 /*
1809 * ulfs_bmaparray callback function for writing.
1810 *
1811 * Since blocks will be written to the new segment anyway,
1812 * we don't care about current daddr of them.
1813 */
1814 static bool
1815 lfs_issequential_hole(const struct lfs *fs,
1816 daddr_t daddr0, daddr_t daddr1)
1817 {
1818 (void)fs; /* not used */
1819
1820 daddr0 = (daddr_t)((int32_t)daddr0); /* XXX ondisk32 */
1821 daddr1 = (daddr_t)((int32_t)daddr1); /* XXX ondisk32 */
1822
1823 KASSERT(daddr0 == UNWRITTEN ||
1824 (0 <= daddr0 && daddr0 <= LFS_MAX_DADDR));
1825 KASSERT(daddr1 == UNWRITTEN ||
1826 (0 <= daddr1 && daddr1 <= LFS_MAX_DADDR));
1827
1828 /* NOTE: all we want to know here is 'hole or not'. */
1829 /* NOTE: UNASSIGNED is converted to 0 by ulfs_bmaparray. */
1830
1831 /*
1832 * treat UNWRITTENs and all resident blocks as 'contiguous'
1833 */
1834 if (daddr0 != 0 && daddr1 != 0)
1835 return true;
1836
1837 /*
1838 * both are in hole?
1839 */
1840 if (daddr0 == 0 && daddr1 == 0)
1841 return true; /* all holes are 'contiguous' for us. */
1842
1843 return false;
1844 }
1845
1846 /*
1847 * lfs_gop_write functions exactly like genfs_gop_write, except that
1848 * (1) it requires the seglock to be held by its caller, and sp->fip
1849 * to be properly initialized (it will return without re-initializing
1850 * sp->fip, and without calling lfs_writeseg).
1851 * (2) it uses the remaining space in the segment, rather than VOP_BMAP,
1852 * to determine how large a block it can write at once (though it does
1853 * still use VOP_BMAP to find holes in the file);
1854 * (3) it calls lfs_gatherblock instead of VOP_STRATEGY on its blocks
1855 * (leaving lfs_writeseg to deal with the cluster blocks, so we might
1856 * now have clusters of clusters, ick.)
1857 */
1858 static int
1859 lfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
1860 int flags)
1861 {
1862 int i, error, run, haveeof = 0;
1863 int fs_bshift;
1864 vaddr_t kva;
1865 off_t eof, offset, startoffset = 0;
1866 size_t bytes, iobytes, skipbytes;
1867 bool async = (flags & PGO_SYNCIO) == 0;
1868 daddr_t lbn, blkno;
1869 struct vm_page *pg;
1870 struct buf *mbp, *bp;
1871 struct vnode *devvp = VTOI(vp)->i_devvp;
1872 struct inode *ip = VTOI(vp);
1873 struct lfs *fs = ip->i_lfs;
1874 struct segment *sp = fs->lfs_sp;
1875 UVMHIST_FUNC("lfs_gop_write"); UVMHIST_CALLED(ubchist);
1876 const char * failreason = NULL;
1877
1878 ASSERT_SEGLOCK(fs);
1879
1880 /* The Ifile lives in the buffer cache */
1881 KASSERT(vp != fs->lfs_ivnode);
1882
1883 /*
1884 * We don't want to fill the disk before the cleaner has a chance
1885 * to make room for us. If we're in danger of doing that, fail
1886 * with EAGAIN. The caller will have to notice this, unlock
1887 * so the cleaner can run, relock and try again.
1888 *
1889 * We must write everything, however, if our vnode is being
1890 * reclaimed.
1891 */
1892 mutex_enter(vp->v_interlock);
1893 if (LFS_STARVED_FOR_SEGS(fs) && vdead_check(vp, VDEAD_NOWAIT) == 0) {
1894 mutex_exit(vp->v_interlock);
1895 failreason = "Starved for segs and not flushing vp";
1896 goto tryagain;
1897 }
1898 mutex_exit(vp->v_interlock);
1899
1900 /*
1901 * Sometimes things slip past the filters in lfs_putpages,
1902 * and the pagedaemon tries to write pages---problem is
1903 * that the pagedaemon never acquires the segment lock.
1904 *
1905 * Alternatively, pages that were clean when we called
1906 * genfs_putpages may have become dirty in the meantime. In this
1907 * case the segment header is not properly set up for blocks
1908 * to be added to it.
1909 *
1910 * Unbusy and unclean the pages, and put them on the ACTIVE
1911 * queue under the hypothesis that they couldn't have got here
1912 * unless they were modified *quite* recently.
1913 *
1914 * XXXUBC that last statement is an oversimplification of course.
1915 */
1916 if (!LFS_SEGLOCK_HELD(fs)) {
1917 failreason = "Seglock not held";
1918 goto tryagain;
1919 }
1920 if (ip->i_lfs_iflags & LFSI_NO_GOP_WRITE) {
1921 failreason = "Inode with no_gop_write";
1922 goto tryagain;
1923 }
1924 if ((pgs[0]->offset & lfs_sb_getbmask(fs)) != 0) {
1925 failreason = "Bad page offset";
1926 goto tryagain;
1927 }
1928
1929 UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
1930 vp, pgs, npages, flags);
1931
1932 GOP_SIZE(vp, vp->v_size, &eof, 0);
1933 haveeof = 1;
1934
1935 if (vp->v_type == VREG)
1936 fs_bshift = vp->v_mount->mnt_fs_bshift;
1937 else
1938 fs_bshift = DEV_BSHIFT;
1939 error = 0;
1940 pg = pgs[0];
1941 startoffset = pg->offset;
1942 KASSERT(eof >= 0);
1943
1944 if (startoffset >= eof) {
1945 failreason = "Offset beyond EOF";
1946 goto tryagain;
1947 } else
1948 bytes = MIN(npages << PAGE_SHIFT, eof - startoffset);
1949 skipbytes = 0;
1950
1951 KASSERT(bytes != 0);
1952
1953 /* Swap PG_DELWRI for PG_PAGEOUT */
1954 for (i = 0; i < npages; i++) {
1955 if (pgs[i]->flags & PG_DELWRI) {
1956 KASSERT(!(pgs[i]->flags & PG_PAGEOUT));
1957 pgs[i]->flags &= ~PG_DELWRI;
1958 pgs[i]->flags |= PG_PAGEOUT;
1959 uvm_pageout_start(1);
1960 mutex_enter(vp->v_interlock);
1961 mutex_enter(&uvm_pageqlock);
1962 uvm_pageunwire(pgs[i]);
1963 mutex_exit(&uvm_pageqlock);
1964 mutex_exit(vp->v_interlock);
1965 }
1966 }
1967
1968 /*
1969 * Check to make sure we're starting on a block boundary.
1970 * We'll check later to make sure we always write entire
1971 * blocks (or fragments).
1972 */
1973 if (startoffset & lfs_sb_getbmask(fs))
1974 printf("%" PRId64 " & %" PRIu64 " = %" PRId64 "\n",
1975 startoffset, lfs_sb_getbmask(fs),
1976 startoffset & lfs_sb_getbmask(fs));
1977 KASSERT((startoffset & lfs_sb_getbmask(fs)) == 0);
1978 if (bytes & lfs_sb_getffmask(fs)) {
1979 printf("lfs_gop_write: asked to write %ld bytes\n", (long)bytes);
1980 panic("lfs_gop_write: non-integer blocks");
1981 }
1982
1983 /*
1984 * We could deadlock here on pager_map with UVMPAGER_MAPIN_WAITOK.
1985 * If we would, write what we have and try again. If we don't
1986 * have anything to write, we'll have to sleep.
1987 */
1988 if ((kva = uvm_pagermapin(pgs, npages, UVMPAGER_MAPIN_WRITE |
1989 (((SEGSUM *)(sp->segsum))->ss_nfinfo < 1 ?
1990 UVMPAGER_MAPIN_WAITOK : 0))) == 0x0) {
1991 DLOG((DLOG_PAGE, "lfs_gop_write: forcing write\n"));
1992 #if 0
1993 " with nfinfo=%d at offset 0x%jx\n",
1994 (int)((SEGSUM *)(sp->segsum))->ss_nfinfo,
1995 (uintmax_t)lfs_sb_getoffset(fs)));
1996 #endif
1997 lfs_updatemeta(sp);
1998 lfs_release_finfo(fs);
1999 (void) lfs_writeseg(fs, sp);
2000
2001 lfs_acquire_finfo(fs, ip->i_number, ip->i_gen);
2002
2003 /*
2004 * Having given up all of the pager_map we were holding,
2005 * we can now wait for aiodoned to reclaim it for us
2006 * without fear of deadlock.
2007 */
2008 kva = uvm_pagermapin(pgs, npages, UVMPAGER_MAPIN_WRITE |
2009 UVMPAGER_MAPIN_WAITOK);
2010 }
2011
2012 mbp = getiobuf(NULL, true);
2013 UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
2014 vp, mbp, vp->v_numoutput, bytes);
2015 mbp->b_bufsize = npages << PAGE_SHIFT;
2016 mbp->b_data = (void *)kva;
2017 mbp->b_resid = mbp->b_bcount = bytes;
2018 mbp->b_cflags = BC_BUSY|BC_AGE;
2019 mbp->b_iodone = uvm_aio_biodone;
2020
2021 bp = NULL;
2022 for (offset = startoffset;
2023 bytes > 0;
2024 offset += iobytes, bytes -= iobytes) {
2025 lbn = offset >> fs_bshift;
2026 error = ulfs_bmaparray(vp, lbn, &blkno, NULL, NULL, &run,
2027 lfs_issequential_hole);
2028 if (error) {
2029 UVMHIST_LOG(ubchist, "ulfs_bmaparray() -> %d",
2030 error,0,0,0);
2031 skipbytes += bytes;
2032 bytes = 0;
2033 break;
2034 }
2035
2036 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
2037 bytes);
2038 if (blkno == (daddr_t)-1) {
2039 skipbytes += iobytes;
2040 continue;
2041 }
2042
2043 /*
2044 * Discover how much we can really pack into this buffer.
2045 */
2046 /* If no room in the current segment, finish it up */
2047 if (sp->sum_bytes_left < sizeof(int32_t) ||
2048 sp->seg_bytes_left < (1 << lfs_sb_getbshift(fs))) {
2049 int vers;
2050
2051 lfs_updatemeta(sp);
2052 vers = sp->fip->fi_version;
2053 lfs_release_finfo(fs);
2054 (void) lfs_writeseg(fs, sp);
2055
2056 lfs_acquire_finfo(fs, ip->i_number, vers);
2057 }
2058 /* Check both for space in segment and space in segsum */
2059 iobytes = MIN(iobytes, (sp->seg_bytes_left >> fs_bshift)
2060 << fs_bshift);
2061 iobytes = MIN(iobytes, (sp->sum_bytes_left / sizeof(int32_t))
2062 << fs_bshift);
2063 KASSERT(iobytes > 0);
2064
2065 /* if it's really one i/o, don't make a second buf */
2066 if (offset == startoffset && iobytes == bytes) {
2067 bp = mbp;
2068 /*
2069 * All the LFS output is done by the segwriter. It
2070 * will increment numoutput by one for all the bufs it
2071 * recieves. However this buffer needs one extra to
2072 * account for aiodone.
2073 */
2074 mutex_enter(vp->v_interlock);
2075 vp->v_numoutput++;
2076 mutex_exit(vp->v_interlock);
2077 } else {
2078 bp = getiobuf(NULL, true);
2079 UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
2080 vp, bp, vp->v_numoutput, 0);
2081 nestiobuf_setup(mbp, bp, offset - pg->offset, iobytes);
2082 /*
2083 * LFS doesn't like async I/O here, dies with
2084 * an assert in lfs_bwrite(). Is that assert
2085 * valid? I retained non-async behaviour when
2086 * converted this to use nestiobuf --pooka
2087 */
2088 bp->b_flags &= ~B_ASYNC;
2089 }
2090
2091 /* XXX This is silly ... is this necessary? */
2092 mutex_enter(&bufcache_lock);
2093 mutex_enter(vp->v_interlock);
2094 bgetvp(vp, bp);
2095 mutex_exit(vp->v_interlock);
2096 mutex_exit(&bufcache_lock);
2097
2098 bp->b_lblkno = lfs_lblkno(fs, offset);
2099 bp->b_private = mbp;
2100 if (devvp->v_type == VBLK) {
2101 bp->b_dev = devvp->v_rdev;
2102 }
2103 VOP_BWRITE(bp->b_vp, bp);
2104 while (lfs_gatherblock(sp, bp, NULL))
2105 continue;
2106 }
2107
2108 nestiobuf_done(mbp, skipbytes, error);
2109 if (skipbytes) {
2110 UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
2111 }
2112 UVMHIST_LOG(ubchist, "returning 0", 0,0,0,0);
2113
2114 if (!async) {
2115 /* Start a segment write. */
2116 UVMHIST_LOG(ubchist, "flushing", 0,0,0,0);
2117 mutex_enter(&lfs_lock);
2118 lfs_flush(fs, 0, 1);
2119 mutex_exit(&lfs_lock);
2120 }
2121
2122 if ((sp->seg_flags & SEGM_SINGLE) && lfs_sb_getcurseg(fs) != fs->lfs_startseg)
2123 return EAGAIN;
2124
2125 return (0);
2126
2127 tryagain:
2128 /*
2129 * We can't write the pages, for whatever reason.
2130 * Clean up after ourselves, and make the caller try again.
2131 */
2132 mutex_enter(vp->v_interlock);
2133
2134 /* Tell why we're here, if we know */
2135 if (failreason != NULL) {
2136 DLOG((DLOG_PAGE, "lfs_gop_write: %s\n", failreason));
2137 }
2138 if (haveeof && startoffset >= eof) {
2139 DLOG((DLOG_PAGE, "lfs_gop_write: ino %d start 0x%" PRIx64
2140 " eof 0x%" PRIx64 " npages=%d\n", VTOI(vp)->i_number,
2141 pgs[0]->offset, eof, npages));
2142 }
2143
2144 mutex_enter(&uvm_pageqlock);
2145 for (i = 0; i < npages; i++) {
2146 pg = pgs[i];
2147
2148 if (pg->flags & PG_PAGEOUT)
2149 uvm_pageout_done(1);
2150 if (pg->flags & PG_DELWRI) {
2151 uvm_pageunwire(pg);
2152 }
2153 uvm_pageactivate(pg);
2154 pg->flags &= ~(PG_CLEAN|PG_DELWRI|PG_PAGEOUT|PG_RELEASED);
2155 DLOG((DLOG_PAGE, "pg[%d] = %p (vp %p off %" PRIx64 ")\n", i, pg,
2156 vp, pg->offset));
2157 DLOG((DLOG_PAGE, "pg[%d]->flags = %x\n", i, pg->flags));
2158 DLOG((DLOG_PAGE, "pg[%d]->pqflags = %x\n", i, pg->pqflags));
2159 DLOG((DLOG_PAGE, "pg[%d]->uanon = %p\n", i, pg->uanon));
2160 DLOG((DLOG_PAGE, "pg[%d]->uobject = %p\n", i, pg->uobject));
2161 DLOG((DLOG_PAGE, "pg[%d]->wire_count = %d\n", i,
2162 pg->wire_count));
2163 DLOG((DLOG_PAGE, "pg[%d]->loan_count = %d\n", i,
2164 pg->loan_count));
2165 }
2166 /* uvm_pageunbusy takes care of PG_BUSY, PG_WANTED */
2167 uvm_page_unbusy(pgs, npages);
2168 mutex_exit(&uvm_pageqlock);
2169 mutex_exit(vp->v_interlock);
2170 return EAGAIN;
2171 }
2172
2173 /*
2174 * finish vnode/inode initialization.
2175 * used by lfs_vget.
2176 */
2177 void
2178 lfs_vinit(struct mount *mp, struct vnode **vpp)
2179 {
2180 struct vnode *vp = *vpp;
2181 struct inode *ip = VTOI(vp);
2182 struct ulfsmount *ump = VFSTOULFS(mp);
2183 struct lfs *fs = ump->um_lfs;
2184 int i;
2185
2186 ip->i_mode = ip->i_ffs1_mode;
2187 ip->i_nlink = ip->i_ffs1_nlink;
2188 ip->i_lfs_osize = ip->i_size = ip->i_ffs1_size;
2189 ip->i_flags = ip->i_ffs1_flags;
2190 ip->i_gen = ip->i_ffs1_gen;
2191 ip->i_uid = ip->i_ffs1_uid;
2192 ip->i_gid = ip->i_ffs1_gid;
2193
2194 ip->i_lfs_effnblks = ip->i_ffs1_blocks;
2195 ip->i_lfs_odnlink = ip->i_ffs1_nlink;
2196
2197 /*
2198 * Initialize the vnode from the inode, check for aliases. In all
2199 * cases re-init ip, the underlying vnode/inode may have changed.
2200 */
2201 ulfs_vinit(mp, lfs_specop_p, lfs_fifoop_p, &vp);
2202 ip = VTOI(vp);
2203
2204 memset(ip->i_lfs_fragsize, 0, ULFS_NDADDR * sizeof(*ip->i_lfs_fragsize));
2205 if (vp->v_type != VLNK || ip->i_size >= ip->i_lfs->um_maxsymlinklen) {
2206 #ifdef DEBUG
2207 for (i = (ip->i_size + lfs_sb_getbsize(fs) - 1) >> lfs_sb_getbshift(fs);
2208 i < ULFS_NDADDR; i++) {
2209 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
2210 i == 0)
2211 continue;
2212 if (ip->i_ffs1_db[i] != 0) {
2213 lfs_dump_dinode(ip->i_din.ffs1_din);
2214 panic("inconsistent inode (direct)");
2215 }
2216 }
2217 for ( ; i < ULFS_NDADDR + ULFS_NIADDR; i++) {
2218 if (ip->i_ffs1_ib[i - ULFS_NDADDR] != 0) {
2219 lfs_dump_dinode(ip->i_din.ffs1_din);
2220 panic("inconsistent inode (indirect)");
2221 }
2222 }
2223 #endif /* DEBUG */
2224 for (i = 0; i < ULFS_NDADDR; i++)
2225 if (ip->i_ffs1_db[i] != 0)
2226 ip->i_lfs_fragsize[i] = lfs_blksize(fs, ip, i);
2227 }
2228
2229 #ifdef DIAGNOSTIC
2230 if (vp->v_type == VNON) {
2231 # ifdef DEBUG
2232 lfs_dump_dinode(ip->i_din.ffs1_din);
2233 # endif
2234 panic("lfs_vinit: ino %llu is type VNON! (ifmt=%o)\n",
2235 (unsigned long long)ip->i_number,
2236 (ip->i_mode & LFS_IFMT) >> 12);
2237 }
2238 #endif /* DIAGNOSTIC */
2239
2240 /*
2241 * Finish inode initialization now that aliasing has been resolved.
2242 */
2243
2244 ip->i_devvp = ump->um_devvp;
2245 vref(ip->i_devvp);
2246 #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
2247 ulfsquota_init(ip);
2248 #endif
2249 genfs_node_init(vp, &lfs_genfsops);
2250 uvm_vnp_setsize(vp, ip->i_size);
2251
2252 /* Initialize hiblk from file size */
2253 ip->i_lfs_hiblk = lfs_lblkno(ip->i_lfs, ip->i_size + lfs_sb_getbsize(ip->i_lfs) - 1) - 1;
2254
2255 *vpp = vp;
2256 }
2257
2258 /*
2259 * Resize the filesystem to contain the specified number of segments.
2260 */
2261 int
2262 lfs_resize_fs(struct lfs *fs, int newnsegs)
2263 {
2264 SEGUSE *sup;
2265 struct buf *bp, *obp;
2266 daddr_t olast, nlast, ilast, noff, start, end;
2267 struct vnode *ivp;
2268 struct inode *ip;
2269 int error, badnews, inc, oldnsegs;
2270 int sbbytes, csbbytes, gain, cgain;
2271 int i;
2272
2273 /* Only support v2 and up */
2274 if (fs->lfs_version < 2)
2275 return EOPNOTSUPP;
2276
2277 /* If we're doing nothing, do it fast */
2278 oldnsegs = lfs_sb_getnseg(fs);
2279 if (newnsegs == oldnsegs)
2280 return 0;
2281
2282 /* We always have to have two superblocks */
2283 if (newnsegs <= lfs_dtosn(fs, lfs_sb_getsboff(fs, 1)))
2284 /* XXX this error code is rather nonsense */
2285 return EFBIG;
2286
2287 ivp = fs->lfs_ivnode;
2288 ip = VTOI(ivp);
2289 error = 0;
2290
2291 /* Take the segment lock so no one else calls lfs_newseg() */
2292 lfs_seglock(fs, SEGM_PROT);
2293
2294 /*
2295 * Make sure the segments we're going to be losing, if any,
2296 * are in fact empty. We hold the seglock, so their status
2297 * cannot change underneath us. Count the superblocks we lose,
2298 * while we're at it.
2299 */
2300 sbbytes = csbbytes = 0;
2301 cgain = 0;
2302 for (i = newnsegs; i < oldnsegs; i++) {
2303 LFS_SEGENTRY(sup, fs, i, bp);
2304 badnews = sup->su_nbytes || !(sup->su_flags & SEGUSE_INVAL);
2305 if (sup->su_flags & SEGUSE_SUPERBLOCK)
2306 sbbytes += LFS_SBPAD;
2307 if (!(sup->su_flags & SEGUSE_DIRTY)) {
2308 ++cgain;
2309 if (sup->su_flags & SEGUSE_SUPERBLOCK)
2310 csbbytes += LFS_SBPAD;
2311 }
2312 brelse(bp, 0);
2313 if (badnews) {
2314 error = EBUSY;
2315 goto out;
2316 }
2317 }
2318
2319 /* Note old and new segment table endpoints, and old ifile size */
2320 olast = lfs_sb_getcleansz(fs) + lfs_sb_getsegtabsz(fs);
2321 nlast = howmany(newnsegs, lfs_sb_getsepb(fs)) + lfs_sb_getcleansz(fs);
2322 ilast = ivp->v_size >> lfs_sb_getbshift(fs);
2323 noff = nlast - olast;
2324
2325 /*
2326 * Make sure no one can use the Ifile while we change it around.
2327 * Even after taking the iflock we need to make sure no one still
2328 * is holding Ifile buffers, so we get each one, to drain them.
2329 * (XXX this could be done better.)
2330 */
2331 rw_enter(&fs->lfs_iflock, RW_WRITER);
2332 for (i = 0; i < ilast; i++) {
2333 /* XXX what to do if bread fails? */
2334 bread(ivp, i, lfs_sb_getbsize(fs), 0, &bp);
2335 brelse(bp, 0);
2336 }
2337
2338 /* Allocate new Ifile blocks */
2339 for (i = ilast; i < ilast + noff; i++) {
2340 if (lfs_balloc(ivp, i * lfs_sb_getbsize(fs), lfs_sb_getbsize(fs), NOCRED, 0,
2341 &bp) != 0)
2342 panic("balloc extending ifile");
2343 memset(bp->b_data, 0, lfs_sb_getbsize(fs));
2344 VOP_BWRITE(bp->b_vp, bp);
2345 }
2346
2347 /* Register new ifile size */
2348 ip->i_size += noff * lfs_sb_getbsize(fs);
2349 ip->i_ffs1_size = ip->i_size;
2350 uvm_vnp_setsize(ivp, ip->i_size);
2351
2352 /* Copy the inode table to its new position */
2353 if (noff != 0) {
2354 if (noff < 0) {
2355 start = nlast;
2356 end = ilast + noff;
2357 inc = 1;
2358 } else {
2359 start = ilast + noff - 1;
2360 end = nlast - 1;
2361 inc = -1;
2362 }
2363 for (i = start; i != end; i += inc) {
2364 if (bread(ivp, i, lfs_sb_getbsize(fs),
2365 B_MODIFY, &bp) != 0)
2366 panic("resize: bread dst blk failed");
2367 if (bread(ivp, i - noff, lfs_sb_getbsize(fs),
2368 0, &obp))
2369 panic("resize: bread src blk failed");
2370 memcpy(bp->b_data, obp->b_data, lfs_sb_getbsize(fs));
2371 VOP_BWRITE(bp->b_vp, bp);
2372 brelse(obp, 0);
2373 }
2374 }
2375
2376 /* If we are expanding, write the new empty SEGUSE entries */
2377 if (newnsegs > oldnsegs) {
2378 for (i = oldnsegs; i < newnsegs; i++) {
2379 if ((error = bread(ivp, i / lfs_sb_getsepb(fs) +
2380 lfs_sb_getcleansz(fs), lfs_sb_getbsize(fs),
2381 B_MODIFY, &bp)) != 0)
2382 panic("lfs: ifile read: %d", error);
2383 while ((i + 1) % lfs_sb_getsepb(fs) && i < newnsegs) {
2384 sup = &((SEGUSE *)bp->b_data)[i % lfs_sb_getsepb(fs)];
2385 memset(sup, 0, sizeof(*sup));
2386 i++;
2387 }
2388 VOP_BWRITE(bp->b_vp, bp);
2389 }
2390 }
2391
2392 /* Zero out unused superblock offsets */
2393 for (i = 2; i < LFS_MAXNUMSB; i++)
2394 if (lfs_dtosn(fs, lfs_sb_getsboff(fs, i)) >= newnsegs)
2395 lfs_sb_setsboff(fs, i, 0x0);
2396
2397 /*
2398 * Correct superblock entries that depend on fs size.
2399 * The computations of these are as follows:
2400 *
2401 * size = lfs_segtod(fs, nseg)
2402 * dsize = lfs_segtod(fs, nseg - minfreeseg) - lfs_btofsb(#super * LFS_SBPAD)
2403 * bfree = dsize - lfs_btofsb(fs, bsize * nseg / 2) - blocks_actually_used
2404 * avail = lfs_segtod(fs, nclean) - lfs_btofsb(#clean_super * LFS_SBPAD)
2405 * + (lfs_segtod(fs, 1) - (offset - curseg))
2406 * - lfs_segtod(fs, minfreeseg - (minfreeseg / 2))
2407 *
2408 * XXX - we should probably adjust minfreeseg as well.
2409 */
2410 gain = (newnsegs - oldnsegs);
2411 lfs_sb_setnseg(fs, newnsegs);
2412 lfs_sb_setsegtabsz(fs, nlast - lfs_sb_getcleansz(fs));
2413 lfs_sb_addsize(fs, gain * lfs_btofsb(fs, lfs_sb_getssize(fs)));
2414 lfs_sb_adddsize(fs, gain * lfs_btofsb(fs, lfs_sb_getssize(fs)) - lfs_btofsb(fs, sbbytes));
2415 lfs_sb_addbfree(fs, gain * lfs_btofsb(fs, lfs_sb_getssize(fs)) - lfs_btofsb(fs, sbbytes)
2416 - gain * lfs_btofsb(fs, lfs_sb_getbsize(fs) / 2));
2417 if (gain > 0) {
2418 lfs_sb_addnclean(fs, gain);
2419 lfs_sb_addavail(fs, gain * lfs_btofsb(fs, lfs_sb_getssize(fs)));
2420 } else {
2421 lfs_sb_subnclean(fs, cgain);
2422 lfs_sb_subavail(fs, cgain * lfs_btofsb(fs, lfs_sb_getssize(fs)) -
2423 lfs_btofsb(fs, csbbytes));
2424 }
2425
2426 /* Resize segment flag cache */
2427 fs->lfs_suflags[0] = realloc(fs->lfs_suflags[0],
2428 lfs_sb_getnseg(fs) * sizeof(u_int32_t), M_SEGMENT, M_WAITOK);
2429 fs->lfs_suflags[1] = realloc(fs->lfs_suflags[1],
2430 lfs_sb_getnseg(fs) * sizeof(u_int32_t), M_SEGMENT, M_WAITOK);
2431 for (i = oldnsegs; i < newnsegs; i++)
2432 fs->lfs_suflags[0][i] = fs->lfs_suflags[1][i] = 0x0;
2433
2434 /* Truncate Ifile if necessary */
2435 if (noff < 0)
2436 lfs_truncate(ivp, ivp->v_size + (noff << lfs_sb_getbshift(fs)), 0,
2437 NOCRED);
2438
2439 /* Update cleaner info so the cleaner can die */
2440 /* XXX what to do if bread fails? */
2441 bread(ivp, 0, lfs_sb_getbsize(fs), B_MODIFY, &bp);
2442 ((CLEANERINFO *)bp->b_data)->clean = lfs_sb_getnclean(fs);
2443 ((CLEANERINFO *)bp->b_data)->dirty = lfs_sb_getnseg(fs) - lfs_sb_getnclean(fs);
2444 VOP_BWRITE(bp->b_vp, bp);
2445
2446 /* Let Ifile accesses proceed */
2447 rw_exit(&fs->lfs_iflock);
2448
2449 out:
2450 lfs_segunlock(fs);
2451 return error;
2452 }
2453
2454 /*
2455 * Extended attribute dispatch
2456 */
2457 int
2458 lfs_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
2459 int attrnamespace, const char *attrname)
2460 {
2461 #ifdef LFS_EXTATTR
2462 struct ulfsmount *ump;
2463
2464 ump = VFSTOULFS(mp);
2465 if (ump->um_fstype == ULFS1) {
2466 return ulfs_extattrctl(mp, cmd, vp, attrnamespace, attrname);
2467 }
2468 #endif
2469 return vfs_stdextattrctl(mp, cmd, vp, attrnamespace, attrname);
2470 }
2471