lfs_vfsops.c revision 1.339 1 /* $NetBSD: lfs_vfsops.c,v 1.339 2015/08/12 18:27:01 dholland Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2007, 2007
5 * The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Konrad E. Schroder <perseant (at) hhhh.org>.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32 /*-
33 * Copyright (c) 1989, 1991, 1993, 1994
34 * The Regents of the University of California. All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)lfs_vfsops.c 8.20 (Berkeley) 6/10/95
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: lfs_vfsops.c,v 1.339 2015/08/12 18:27:01 dholland Exp $");
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_lfs.h"
68 #include "opt_quota.h"
69 #endif
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/namei.h>
74 #include <sys/proc.h>
75 #include <sys/kernel.h>
76 #include <sys/vnode.h>
77 #include <sys/mount.h>
78 #include <sys/kthread.h>
79 #include <sys/buf.h>
80 #include <sys/device.h>
81 #include <sys/mbuf.h>
82 #include <sys/file.h>
83 #include <sys/disklabel.h>
84 #include <sys/ioctl.h>
85 #include <sys/errno.h>
86 #include <sys/malloc.h>
87 #include <sys/pool.h>
88 #include <sys/socket.h>
89 #include <sys/syslog.h>
90 #include <uvm/uvm_extern.h>
91 #include <sys/sysctl.h>
92 #include <sys/conf.h>
93 #include <sys/kauth.h>
94 #include <sys/module.h>
95 #include <sys/syscallvar.h>
96 #include <sys/syscall.h>
97 #include <sys/syscallargs.h>
98
99 #include <miscfs/specfs/specdev.h>
100
101 #include <ufs/lfs/ulfs_quotacommon.h>
102 #include <ufs/lfs/ulfs_inode.h>
103 #include <ufs/lfs/ulfsmount.h>
104 #include <ufs/lfs/ulfs_bswap.h>
105 #include <ufs/lfs/ulfs_extern.h>
106
107 #include <uvm/uvm.h>
108 #include <uvm/uvm_stat.h>
109 #include <uvm/uvm_pager.h>
110 #include <uvm/uvm_pdaemon.h>
111
112 #include <ufs/lfs/lfs.h>
113 #include <ufs/lfs/lfs_accessors.h>
114 #include <ufs/lfs/lfs_kernel.h>
115 #include <ufs/lfs/lfs_extern.h>
116
117 #include <miscfs/genfs/genfs.h>
118 #include <miscfs/genfs/genfs_node.h>
119
120 MODULE(MODULE_CLASS_VFS, lfs, NULL);
121
122 static int lfs_gop_write(struct vnode *, struct vm_page **, int, int);
123 static int lfs_mountfs(struct vnode *, struct mount *, struct lwp *);
124
125 static struct sysctllog *lfs_sysctl_log;
126
127 extern const struct vnodeopv_desc lfs_vnodeop_opv_desc;
128 extern const struct vnodeopv_desc lfs_specop_opv_desc;
129 extern const struct vnodeopv_desc lfs_fifoop_opv_desc;
130
131 pid_t lfs_writer_daemon = 0;
132 lwpid_t lfs_writer_lid = 0;
133 int lfs_do_flush = 0;
134 #ifdef LFS_KERNEL_RFW
135 int lfs_do_rfw = 0;
136 #endif
137
138 const struct vnodeopv_desc * const lfs_vnodeopv_descs[] = {
139 &lfs_vnodeop_opv_desc,
140 &lfs_specop_opv_desc,
141 &lfs_fifoop_opv_desc,
142 NULL,
143 };
144
145 struct vfsops lfs_vfsops = {
146 .vfs_name = MOUNT_LFS,
147 .vfs_min_mount_data = sizeof (struct ulfs_args),
148 .vfs_mount = lfs_mount,
149 .vfs_start = ulfs_start,
150 .vfs_unmount = lfs_unmount,
151 .vfs_root = ulfs_root,
152 .vfs_quotactl = ulfs_quotactl,
153 .vfs_statvfs = lfs_statvfs,
154 .vfs_sync = lfs_sync,
155 .vfs_vget = lfs_vget,
156 .vfs_loadvnode = lfs_loadvnode,
157 .vfs_newvnode = lfs_newvnode,
158 .vfs_fhtovp = lfs_fhtovp,
159 .vfs_vptofh = lfs_vptofh,
160 .vfs_init = lfs_init,
161 .vfs_reinit = lfs_reinit,
162 .vfs_done = lfs_done,
163 .vfs_mountroot = lfs_mountroot,
164 .vfs_snapshot = (void *)eopnotsupp,
165 .vfs_extattrctl = lfs_extattrctl,
166 .vfs_suspendctl = (void *)eopnotsupp,
167 .vfs_renamelock_enter = genfs_renamelock_enter,
168 .vfs_renamelock_exit = genfs_renamelock_exit,
169 .vfs_fsync = (void *)eopnotsupp,
170 .vfs_opv_descs = lfs_vnodeopv_descs
171 };
172
173 const struct genfs_ops lfs_genfsops = {
174 .gop_size = lfs_gop_size,
175 .gop_alloc = ulfs_gop_alloc,
176 .gop_write = lfs_gop_write,
177 .gop_markupdate = ulfs_gop_markupdate,
178 };
179
180 struct shortlong {
181 const char *sname;
182 const char *lname;
183 };
184
185 static int
186 sysctl_lfs_dostats(SYSCTLFN_ARGS)
187 {
188 extern struct lfs_stats lfs_stats;
189 extern int lfs_dostats;
190 int error;
191
192 error = sysctl_lookup(SYSCTLFN_CALL(rnode));
193 if (error || newp == NULL)
194 return (error);
195
196 if (lfs_dostats == 0)
197 memset(&lfs_stats, 0, sizeof(lfs_stats));
198
199 return (0);
200 }
201
202 static void
203 lfs_sysctl_setup(struct sysctllog **clog)
204 {
205 int i;
206 extern int lfs_writeindir, lfs_dostats, lfs_clean_vnhead,
207 lfs_fs_pagetrip, lfs_ignore_lazy_sync;
208 #ifdef DEBUG
209 extern int lfs_debug_log_subsys[DLOG_MAX];
210 struct shortlong dlog_names[DLOG_MAX] = { /* Must match lfs.h ! */
211 { "rollforward", "Debug roll-forward code" },
212 { "alloc", "Debug inode allocation and free list" },
213 { "avail", "Debug space-available-now accounting" },
214 { "flush", "Debug flush triggers" },
215 { "lockedlist", "Debug locked list accounting" },
216 { "vnode_verbose", "Verbose per-vnode-written debugging" },
217 { "vnode", "Debug vnode use during segment write" },
218 { "segment", "Debug segment writing" },
219 { "seguse", "Debug segment used-bytes accounting" },
220 { "cleaner", "Debug cleaning routines" },
221 { "mount", "Debug mount/unmount routines" },
222 { "pagecache", "Debug UBC interactions" },
223 { "dirop", "Debug directory-operation accounting" },
224 { "malloc", "Debug private malloc accounting" },
225 };
226 #endif /* DEBUG */
227 struct shortlong stat_names[] = { /* Must match lfs.h! */
228 { "segsused", "Number of new segments allocated" },
229 { "psegwrites", "Number of partial-segment writes" },
230 { "psyncwrites", "Number of synchronous partial-segment"
231 " writes" },
232 { "pcleanwrites", "Number of partial-segment writes by the"
233 " cleaner" },
234 { "blocktot", "Number of blocks written" },
235 { "cleanblocks", "Number of blocks written by the cleaner" },
236 { "ncheckpoints", "Number of checkpoints made" },
237 { "nwrites", "Number of whole writes" },
238 { "nsync_writes", "Number of synchronous writes" },
239 { "wait_exceeded", "Number of times writer waited for"
240 " cleaner" },
241 { "write_exceeded", "Number of times writer invoked flush" },
242 { "flush_invoked", "Number of times flush was invoked" },
243 { "vflush_invoked", "Number of time vflush was called" },
244 { "clean_inlocked", "Number of vnodes skipped for being dead" },
245 { "clean_vnlocked", "Number of vnodes skipped for vget failure" },
246 { "segs_reclaimed", "Number of segments reclaimed" },
247 };
248
249 sysctl_createv(clog, 0, NULL, NULL,
250 CTLFLAG_PERMANENT,
251 CTLTYPE_NODE, "lfs",
252 SYSCTL_DESCR("Log-structured file system"),
253 NULL, 0, NULL, 0,
254 CTL_VFS, 5, CTL_EOL);
255 /*
256 * XXX the "5" above could be dynamic, thereby eliminating one
257 * more instance of the "number to vfs" mapping problem, but
258 * "5" is the order as taken from sys/mount.h
259 */
260
261 sysctl_createv(clog, 0, NULL, NULL,
262 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
263 CTLTYPE_INT, "flushindir", NULL,
264 NULL, 0, &lfs_writeindir, 0,
265 CTL_VFS, 5, LFS_WRITEINDIR, CTL_EOL);
266 sysctl_createv(clog, 0, NULL, NULL,
267 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
268 CTLTYPE_INT, "clean_vnhead", NULL,
269 NULL, 0, &lfs_clean_vnhead, 0,
270 CTL_VFS, 5, LFS_CLEAN_VNHEAD, CTL_EOL);
271 sysctl_createv(clog, 0, NULL, NULL,
272 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
273 CTLTYPE_INT, "dostats",
274 SYSCTL_DESCR("Maintain statistics on LFS operations"),
275 sysctl_lfs_dostats, 0, &lfs_dostats, 0,
276 CTL_VFS, 5, LFS_DOSTATS, CTL_EOL);
277 sysctl_createv(clog, 0, NULL, NULL,
278 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
279 CTLTYPE_INT, "pagetrip",
280 SYSCTL_DESCR("How many dirty pages in fs triggers"
281 " a flush"),
282 NULL, 0, &lfs_fs_pagetrip, 0,
283 CTL_VFS, 5, LFS_FS_PAGETRIP, CTL_EOL);
284 sysctl_createv(clog, 0, NULL, NULL,
285 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
286 CTLTYPE_INT, "ignore_lazy_sync",
287 SYSCTL_DESCR("Lazy Sync is ignored entirely"),
288 NULL, 0, &lfs_ignore_lazy_sync, 0,
289 CTL_VFS, 5, LFS_IGNORE_LAZY_SYNC, CTL_EOL);
290 #ifdef LFS_KERNEL_RFW
291 sysctl_createv(clog, 0, NULL, NULL,
292 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
293 CTLTYPE_INT, "rfw",
294 SYSCTL_DESCR("Use in-kernel roll-forward on mount"),
295 NULL, 0, &lfs_do_rfw, 0,
296 CTL_VFS, 5, LFS_DO_RFW, CTL_EOL);
297 #endif
298
299 sysctl_createv(clog, 0, NULL, NULL,
300 CTLFLAG_PERMANENT,
301 CTLTYPE_NODE, "stats",
302 SYSCTL_DESCR("Debugging options"),
303 NULL, 0, NULL, 0,
304 CTL_VFS, 5, LFS_STATS, CTL_EOL);
305 for (i = 0; i < sizeof(struct lfs_stats) / sizeof(u_int); i++) {
306 sysctl_createv(clog, 0, NULL, NULL,
307 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
308 CTLTYPE_INT, stat_names[i].sname,
309 SYSCTL_DESCR(stat_names[i].lname),
310 NULL, 0, &(((u_int *)&lfs_stats.segsused)[i]),
311 0, CTL_VFS, 5, LFS_STATS, i, CTL_EOL);
312 }
313
314 #ifdef DEBUG
315 sysctl_createv(clog, 0, NULL, NULL,
316 CTLFLAG_PERMANENT,
317 CTLTYPE_NODE, "debug",
318 SYSCTL_DESCR("Debugging options"),
319 NULL, 0, NULL, 0,
320 CTL_VFS, 5, LFS_DEBUGLOG, CTL_EOL);
321 for (i = 0; i < DLOG_MAX; i++) {
322 sysctl_createv(clog, 0, NULL, NULL,
323 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
324 CTLTYPE_INT, dlog_names[i].sname,
325 SYSCTL_DESCR(dlog_names[i].lname),
326 NULL, 0, &(lfs_debug_log_subsys[i]), 0,
327 CTL_VFS, 5, LFS_DEBUGLOG, i, CTL_EOL);
328 }
329 #endif
330 }
331
332 /* old cleaner syscall interface. see VOP_FCNTL() */
333 static const struct syscall_package lfs_syscalls[] = {
334 { SYS_lfs_bmapv, 0, (sy_call_t *)sys_lfs_bmapv },
335 { SYS_lfs_markv, 0, (sy_call_t *)sys_lfs_markv },
336 { SYS___lfs_segwait50, 0, (sy_call_t *)sys___lfs_segwait50 },
337 { SYS_lfs_segclean, 0, (sy_call_t *)sys_lfs_segclean },
338 { 0, 0, NULL },
339 };
340
341 static int
342 lfs_modcmd(modcmd_t cmd, void *arg)
343 {
344 int error;
345
346 switch (cmd) {
347 case MODULE_CMD_INIT:
348 error = syscall_establish(NULL, lfs_syscalls);
349 if (error)
350 return error;
351 error = vfs_attach(&lfs_vfsops);
352 if (error != 0) {
353 syscall_disestablish(NULL, lfs_syscalls);
354 break;
355 }
356 lfs_sysctl_setup(&lfs_sysctl_log);
357 break;
358 case MODULE_CMD_FINI:
359 error = vfs_detach(&lfs_vfsops);
360 if (error != 0)
361 break;
362 syscall_disestablish(NULL, lfs_syscalls);
363 sysctl_teardown(&lfs_sysctl_log);
364 break;
365 default:
366 error = ENOTTY;
367 break;
368 }
369
370 return (error);
371 }
372
373 /*
374 * XXX Same structure as FFS inodes? Should we share a common pool?
375 */
376 struct pool lfs_inode_pool;
377 struct pool lfs_dinode_pool;
378 struct pool lfs_inoext_pool;
379 struct pool lfs_lbnentry_pool;
380
381 /*
382 * The writer daemon. UVM keeps track of how many dirty pages we are holding
383 * in lfs_subsys_pages; the daemon flushes the filesystem when this value
384 * crosses the (user-defined) threshhold LFS_MAX_PAGES.
385 */
386 static void
387 lfs_writerd(void *arg)
388 {
389 struct mount *mp, *nmp;
390 struct lfs *fs;
391 struct vfsops *vfs = NULL;
392 int fsflags;
393 int skipc;
394 int lfsc;
395 int wrote_something = 0;
396
397 mutex_enter(&lfs_lock);
398 lfs_writer_daemon = curproc->p_pid;
399 lfs_writer_lid = curlwp->l_lid;
400 mutex_exit(&lfs_lock);
401
402 /* Take an extra reference to the LFS vfsops. */
403 vfs = vfs_getopsbyname(MOUNT_LFS);
404
405 mutex_enter(&lfs_lock);
406 for (;;) {
407 KASSERT(mutex_owned(&lfs_lock));
408 if (wrote_something == 0)
409 mtsleep(&lfs_writer_daemon, PVM, "lfswriter", hz/10 + 1,
410 &lfs_lock);
411
412 KASSERT(mutex_owned(&lfs_lock));
413 wrote_something = 0;
414
415 /*
416 * If global state wants a flush, flush everything.
417 */
418 if (lfs_do_flush || locked_queue_count > LFS_MAX_BUFS ||
419 locked_queue_bytes > LFS_MAX_BYTES ||
420 lfs_subsys_pages > LFS_MAX_PAGES) {
421
422 if (lfs_do_flush) {
423 DLOG((DLOG_FLUSH, "lfs_writerd: lfs_do_flush\n"));
424 }
425 if (locked_queue_count > LFS_MAX_BUFS) {
426 DLOG((DLOG_FLUSH, "lfs_writerd: lqc = %d, max %d\n",
427 locked_queue_count, LFS_MAX_BUFS));
428 }
429 if (locked_queue_bytes > LFS_MAX_BYTES) {
430 DLOG((DLOG_FLUSH, "lfs_writerd: lqb = %ld, max %ld\n",
431 locked_queue_bytes, LFS_MAX_BYTES));
432 }
433 if (lfs_subsys_pages > LFS_MAX_PAGES) {
434 DLOG((DLOG_FLUSH, "lfs_writerd: lssp = %d, max %d\n",
435 lfs_subsys_pages, LFS_MAX_PAGES));
436 }
437
438 lfs_flush(NULL, SEGM_WRITERD, 0);
439 lfs_do_flush = 0;
440 KASSERT(mutex_owned(&lfs_lock));
441 continue;
442 }
443 KASSERT(mutex_owned(&lfs_lock));
444 mutex_exit(&lfs_lock);
445
446 /*
447 * Look through the list of LFSs to see if any of them
448 * have requested pageouts.
449 */
450 mutex_enter(&mountlist_lock);
451 lfsc = 0;
452 skipc = 0;
453 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
454 if (vfs_busy(mp, &nmp)) {
455 ++skipc;
456 continue;
457 }
458 KASSERT(!mutex_owned(&lfs_lock));
459 if (strncmp(mp->mnt_stat.f_fstypename, MOUNT_LFS,
460 sizeof(mp->mnt_stat.f_fstypename)) == 0) {
461 ++lfsc;
462 fs = VFSTOULFS(mp)->um_lfs;
463 daddr_t ooffset = 0;
464 fsflags = SEGM_SINGLE;
465
466 mutex_enter(&lfs_lock);
467 ooffset = lfs_sb_getoffset(fs);
468
469 if (lfs_sb_getnextseg(fs) < lfs_sb_getcurseg(fs) && fs->lfs_nowrap) {
470 /* Don't try to write if we're suspended */
471 mutex_exit(&lfs_lock);
472 vfs_unbusy(mp, false, &nmp);
473 continue;
474 }
475 if (LFS_STARVED_FOR_SEGS(fs)) {
476 mutex_exit(&lfs_lock);
477
478 DLOG((DLOG_FLUSH, "lfs_writerd: need cleaning before writing possible\n"));
479 lfs_wakeup_cleaner(fs);
480 vfs_unbusy(mp, false, &nmp);
481 continue;
482 }
483
484 if ((fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
485 lfs_dirvcount > LFS_MAX_DIROP) &&
486 fs->lfs_dirops == 0) {
487 fsflags &= ~SEGM_SINGLE;
488 fsflags |= SEGM_CKP;
489 DLOG((DLOG_FLUSH, "lfs_writerd: checkpoint\n"));
490 lfs_flush_fs(fs, fsflags);
491 } else if (fs->lfs_pdflush) {
492 DLOG((DLOG_FLUSH, "lfs_writerd: pdflush set\n"));
493 lfs_flush_fs(fs, fsflags);
494 } else if (!TAILQ_EMPTY(&fs->lfs_pchainhd)) {
495 DLOG((DLOG_FLUSH, "lfs_writerd: pchain non-empty\n"));
496 mutex_exit(&lfs_lock);
497 lfs_writer_enter(fs, "wrdirop");
498 lfs_flush_pchain(fs);
499 lfs_writer_leave(fs);
500 mutex_enter(&lfs_lock);
501 }
502 if (lfs_sb_getoffset(fs) != ooffset)
503 ++wrote_something;
504 mutex_exit(&lfs_lock);
505 }
506 KASSERT(!mutex_owned(&lfs_lock));
507 vfs_unbusy(mp, false, &nmp);
508 }
509 if (lfsc + skipc == 0) {
510 mutex_enter(&lfs_lock);
511 lfs_writer_daemon = 0;
512 lfs_writer_lid = 0;
513 mutex_exit(&lfs_lock);
514 mutex_exit(&mountlist_lock);
515 break;
516 }
517 mutex_exit(&mountlist_lock);
518
519 mutex_enter(&lfs_lock);
520 }
521 KASSERT(!mutex_owned(&lfs_lock));
522 KASSERT(!mutex_owned(&mountlist_lock));
523
524 /* Give up our extra reference so the module can be unloaded. */
525 mutex_enter(&vfs_list_lock);
526 if (vfs != NULL)
527 vfs->vfs_refcount--;
528 mutex_exit(&vfs_list_lock);
529
530 /* Done! */
531 kthread_exit(0);
532 }
533
534 /*
535 * Initialize the filesystem, most work done by ulfs_init.
536 */
537 void
538 lfs_init(void)
539 {
540
541 malloc_type_attach(M_SEGMENT);
542 pool_init(&lfs_inode_pool, sizeof(struct inode), 0, 0, 0,
543 "lfsinopl", &pool_allocator_nointr, IPL_NONE);
544 pool_init(&lfs_dinode_pool, sizeof(struct ulfs1_dinode), 0, 0, 0,
545 "lfsdinopl", &pool_allocator_nointr, IPL_NONE);
546 pool_init(&lfs_inoext_pool, sizeof(struct lfs_inode_ext), 8, 0, 0,
547 "lfsinoextpl", &pool_allocator_nointr, IPL_NONE);
548 pool_init(&lfs_lbnentry_pool, sizeof(struct lbnentry), 0, 0, 0,
549 "lfslbnpool", &pool_allocator_nointr, IPL_NONE);
550 ulfs_init();
551
552 #ifdef DEBUG
553 memset(lfs_log, 0, sizeof(lfs_log));
554 #endif
555 mutex_init(&lfs_lock, MUTEX_DEFAULT, IPL_NONE);
556 cv_init(&locked_queue_cv, "lfsbuf");
557 cv_init(&lfs_writing_cv, "lfsflush");
558 }
559
560 void
561 lfs_reinit(void)
562 {
563 ulfs_reinit();
564 }
565
566 void
567 lfs_done(void)
568 {
569 ulfs_done();
570 mutex_destroy(&lfs_lock);
571 cv_destroy(&locked_queue_cv);
572 cv_destroy(&lfs_writing_cv);
573 pool_destroy(&lfs_inode_pool);
574 pool_destroy(&lfs_dinode_pool);
575 pool_destroy(&lfs_inoext_pool);
576 pool_destroy(&lfs_lbnentry_pool);
577 malloc_type_detach(M_SEGMENT);
578 }
579
580 /*
581 * Called by main() when ulfs is going to be mounted as root.
582 */
583 int
584 lfs_mountroot(void)
585 {
586 extern struct vnode *rootvp;
587 struct lfs *fs = NULL; /* LFS */
588 struct mount *mp;
589 struct lwp *l = curlwp;
590 struct ulfsmount *ump;
591 int error;
592
593 if (device_class(root_device) != DV_DISK)
594 return (ENODEV);
595
596 if (rootdev == NODEV)
597 return (ENODEV);
598 if ((error = vfs_rootmountalloc(MOUNT_LFS, "root_device", &mp))) {
599 vrele(rootvp);
600 return (error);
601 }
602 if ((error = lfs_mountfs(rootvp, mp, l))) {
603 vfs_unbusy(mp, false, NULL);
604 vfs_destroy(mp);
605 return (error);
606 }
607 mountlist_append(mp);
608 ump = VFSTOULFS(mp);
609 fs = ump->um_lfs;
610 lfs_sb_setfsmnt(fs, mp->mnt_stat.f_mntonname);
611 (void)lfs_statvfs(mp, &mp->mnt_stat);
612 vfs_unbusy(mp, false, NULL);
613 setrootfstime((time_t)lfs_sb_gettstamp(VFSTOULFS(mp)->um_lfs));
614 return (0);
615 }
616
617 /*
618 * VFS Operations.
619 *
620 * mount system call
621 */
622 int
623 lfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
624 {
625 struct lwp *l = curlwp;
626 struct vnode *devvp;
627 struct ulfs_args *args = data;
628 struct ulfsmount *ump = NULL;
629 struct lfs *fs = NULL; /* LFS */
630 int error = 0, update;
631 mode_t accessmode;
632
633 if (args == NULL)
634 return EINVAL;
635 if (*data_len < sizeof *args)
636 return EINVAL;
637
638 if (mp->mnt_flag & MNT_GETARGS) {
639 ump = VFSTOULFS(mp);
640 if (ump == NULL)
641 return EIO;
642 args->fspec = NULL;
643 *data_len = sizeof *args;
644 return 0;
645 }
646
647 update = mp->mnt_flag & MNT_UPDATE;
648
649 /* Check arguments */
650 if (args->fspec != NULL) {
651 /*
652 * Look up the name and verify that it's sane.
653 */
654 error = namei_simple_user(args->fspec,
655 NSM_FOLLOW_NOEMULROOT, &devvp);
656 if (error != 0)
657 return (error);
658
659 if (!update) {
660 /*
661 * Be sure this is a valid block device
662 */
663 if (devvp->v_type != VBLK)
664 error = ENOTBLK;
665 else if (bdevsw_lookup(devvp->v_rdev) == NULL)
666 error = ENXIO;
667 } else {
668 /*
669 * Be sure we're still naming the same device
670 * used for our initial mount
671 */
672 ump = VFSTOULFS(mp);
673 if (devvp != ump->um_devvp) {
674 if (devvp->v_rdev != ump->um_devvp->v_rdev)
675 error = EINVAL;
676 else {
677 vrele(devvp);
678 devvp = ump->um_devvp;
679 vref(devvp);
680 }
681 }
682 }
683 } else {
684 if (!update) {
685 /* New mounts must have a filename for the device */
686 return (EINVAL);
687 } else {
688 /* Use the extant mount */
689 ump = VFSTOULFS(mp);
690 devvp = ump->um_devvp;
691 vref(devvp);
692 }
693 }
694
695
696 /*
697 * If mount by non-root, then verify that user has necessary
698 * permissions on the device.
699 */
700 if (error == 0) {
701 accessmode = VREAD;
702 if (update ?
703 (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
704 (mp->mnt_flag & MNT_RDONLY) == 0)
705 accessmode |= VWRITE;
706 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
707 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
708 KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp,
709 KAUTH_ARG(accessmode));
710 VOP_UNLOCK(devvp);
711 }
712
713 if (error) {
714 vrele(devvp);
715 return (error);
716 }
717
718 if (!update) {
719 int flags;
720
721 if (mp->mnt_flag & MNT_RDONLY)
722 flags = FREAD;
723 else
724 flags = FREAD|FWRITE;
725 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
726 error = VOP_OPEN(devvp, flags, FSCRED);
727 VOP_UNLOCK(devvp);
728 if (error)
729 goto fail;
730 error = lfs_mountfs(devvp, mp, l); /* LFS */
731 if (error) {
732 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
733 (void)VOP_CLOSE(devvp, flags, NOCRED);
734 VOP_UNLOCK(devvp);
735 goto fail;
736 }
737
738 ump = VFSTOULFS(mp);
739 fs = ump->um_lfs;
740 } else {
741 /*
742 * Update the mount.
743 */
744
745 /*
746 * The initial mount got a reference on this
747 * device, so drop the one obtained via
748 * namei(), above.
749 */
750 vrele(devvp);
751
752 ump = VFSTOULFS(mp);
753 fs = ump->um_lfs;
754
755 if (fs->lfs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
756 /*
757 * Changing from read/write to read-only.
758 * XXX: shouldn't we sync here? or does vfs do that?
759 */
760 #ifdef LFS_QUOTA2
761 /* XXX: quotas should remain on when readonly */
762 if (fs->lfs_use_quota2) {
763 error = lfsquota2_umount(mp, 0);
764 if (error) {
765 return error;
766 }
767 }
768 #endif
769 }
770
771 if (fs->lfs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
772 /*
773 * Changing from read-only to read/write.
774 * Note in the superblocks that we're writing.
775 */
776
777 /* XXX: quotas should have been on even if readonly */
778 if (fs->lfs_use_quota2) {
779 #ifdef LFS_QUOTA2
780 error = lfs_quota2_mount(mp);
781 #else
782 uprintf("%s: no kernel support for this "
783 "filesystem's quotas\n",
784 mp->mnt_stat.f_mntonname);
785 if (mp->mnt_flag & MNT_FORCE) {
786 uprintf("%s: mounting anyway; "
787 "fsck afterwards\n",
788 mp->mnt_stat.f_mntonname);
789 } else {
790 error = EINVAL;
791 }
792 #endif
793 if (error) {
794 return error;
795 }
796 }
797
798 fs->lfs_ronly = 0;
799 if (lfs_sb_getpflags(fs) & LFS_PF_CLEAN) {
800 lfs_sb_setpflags(fs, lfs_sb_getpflags(fs) & ~LFS_PF_CLEAN);
801 lfs_writesuper(fs, lfs_sb_getsboff(fs, 0));
802 lfs_writesuper(fs, lfs_sb_getsboff(fs, 1));
803 }
804 }
805 if (args->fspec == NULL)
806 return EINVAL;
807 }
808
809 error = set_statvfs_info(path, UIO_USERSPACE, args->fspec,
810 UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
811 if (error == 0)
812 lfs_sb_setfsmnt(fs, mp->mnt_stat.f_mntonname);
813 return error;
814
815 fail:
816 vrele(devvp);
817 return (error);
818 }
819
820
821 /*
822 * Common code for mount and mountroot
823 * LFS specific
824 */
825 int
826 lfs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l)
827 {
828 struct dlfs *tdfs, *dfs, *adfs;
829 struct lfs *fs;
830 struct ulfsmount *ump;
831 struct vnode *vp;
832 struct buf *bp, *abp;
833 dev_t dev;
834 int error, i, ronly, fsbsize;
835 kauth_cred_t cred;
836 CLEANERINFO *cip;
837 SEGUSE *sup;
838 daddr_t sb_addr;
839
840 cred = l ? l->l_cred : NOCRED;
841
842 /*
843 * Flush out any old buffers remaining from a previous use.
844 */
845 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
846 error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
847 VOP_UNLOCK(devvp);
848 if (error)
849 return (error);
850
851 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
852
853 /* Don't free random space on error. */
854 bp = NULL;
855 abp = NULL;
856 ump = NULL;
857
858 sb_addr = LFS_LABELPAD / DEV_BSIZE;
859 while (1) {
860 /* Read in the superblock. */
861 error = bread(devvp, sb_addr, LFS_SBPAD, 0, &bp);
862 if (error)
863 goto out;
864 dfs = (struct dlfs *)bp->b_data;
865
866 /* Check the basics. */
867 if (dfs->dlfs_magic != LFS_MAGIC || dfs->dlfs_bsize > MAXBSIZE ||
868 dfs->dlfs_version > LFS_VERSION ||
869 dfs->dlfs_bsize < sizeof(struct dlfs)) {
870 DLOG((DLOG_MOUNT, "lfs_mountfs: primary superblock sanity failed\n"));
871 error = EINVAL; /* XXX needs translation */
872 goto out;
873 }
874 if (dfs->dlfs_inodefmt > LFS_MAXINODEFMT) {
875 DLOG((DLOG_MOUNT, "lfs_mountfs: unknown inode format %d\n",
876 dfs->dlfs_inodefmt));
877 error = EINVAL;
878 goto out;
879 }
880
881 if (dfs->dlfs_version == 1)
882 fsbsize = DEV_BSIZE;
883 else {
884 fsbsize = 1 << dfs->dlfs_ffshift;
885 /*
886 * Could be, if the frag size is large enough, that we
887 * don't have the "real" primary superblock. If that's
888 * the case, get the real one, and try again.
889 */
890 if (sb_addr != (dfs->dlfs_sboffs[0] << (dfs->dlfs_ffshift - DEV_BSHIFT))) {
891 DLOG((DLOG_MOUNT, "lfs_mountfs: sb daddr"
892 " 0x%llx is not right, trying 0x%llx\n",
893 (long long)sb_addr,
894 (long long)(dfs->dlfs_sboffs[0] << (dfs->dlfs_ffshift - DEV_BSHIFT))));
895 sb_addr = dfs->dlfs_sboffs[0] << (dfs->dlfs_ffshift - DEV_BSHIFT);
896 brelse(bp, 0);
897 continue;
898 }
899 }
900 break;
901 }
902
903 /*
904 * Check the second superblock to see which is newer; then mount
905 * using the older of the two. This is necessary to ensure that
906 * the filesystem is valid if it was not unmounted cleanly.
907 */
908
909 if (dfs->dlfs_sboffs[1] &&
910 dfs->dlfs_sboffs[1] - LFS_LABELPAD / fsbsize > LFS_SBPAD / fsbsize)
911 {
912 error = bread(devvp, dfs->dlfs_sboffs[1] * (fsbsize / DEV_BSIZE),
913 LFS_SBPAD, 0, &abp);
914 if (error)
915 goto out;
916 adfs = (struct dlfs *)abp->b_data;
917
918 if (dfs->dlfs_version == 1) {
919 /* 1s resolution comparison */
920 if (adfs->dlfs_tstamp < dfs->dlfs_tstamp)
921 tdfs = adfs;
922 else
923 tdfs = dfs;
924 } else {
925 /* monotonic infinite-resolution comparison */
926 if (adfs->dlfs_serial < dfs->dlfs_serial)
927 tdfs = adfs;
928 else
929 tdfs = dfs;
930 }
931
932 /* Check the basics. */
933 if (tdfs->dlfs_magic != LFS_MAGIC ||
934 tdfs->dlfs_bsize > MAXBSIZE ||
935 tdfs->dlfs_version > LFS_VERSION ||
936 tdfs->dlfs_bsize < sizeof(struct dlfs)) {
937 DLOG((DLOG_MOUNT, "lfs_mountfs: alt superblock"
938 " sanity failed\n"));
939 error = EINVAL; /* XXX needs translation */
940 goto out;
941 }
942 } else {
943 DLOG((DLOG_MOUNT, "lfs_mountfs: invalid alt superblock"
944 " daddr=0x%x\n", dfs->dlfs_sboffs[1]));
945 error = EINVAL;
946 goto out;
947 }
948
949 /* Allocate the mount structure, copy the superblock into it. */
950 fs = kmem_zalloc(sizeof(struct lfs), KM_SLEEP);
951 memcpy(&fs->lfs_dlfs_u.u_32, tdfs, sizeof(struct dlfs));
952 fs->lfs_is64 = false;
953
954 /* Compatibility */
955 if (lfs_sb_getversion(fs) < 2) {
956 lfs_sb_setsumsize(fs, LFS_V1_SUMMARY_SIZE);
957 lfs_sb_setibsize(fs, lfs_sb_getbsize(fs));
958 lfs_sb_sets0addr(fs, lfs_sb_getsboff(fs, 0));
959 lfs_sb_settstamp(fs, lfs_sb_getotstamp(fs));
960 lfs_sb_setfsbtodb(fs, 0);
961 }
962 if (lfs_sb_getresvseg(fs) == 0)
963 lfs_sb_setresvseg(fs, MIN(lfs_sb_getminfreeseg(fs) - 1, \
964 MAX(MIN_RESV_SEGS, lfs_sb_getminfreeseg(fs) / 2 + 1)));
965
966 /*
967 * If we aren't going to be able to write meaningfully to this
968 * filesystem, and were not mounted readonly, bomb out now.
969 */
970 if (lfs_fsbtob(fs, LFS_NRESERVE(fs)) > LFS_MAX_BYTES && !ronly) {
971 DLOG((DLOG_MOUNT, "lfs_mount: to mount this filesystem read/write,"
972 " we need BUFPAGES >= %lld\n",
973 (long long)((bufmem_hiwater / bufmem_lowater) *
974 LFS_INVERSE_MAX_BYTES(
975 lfs_fsbtob(fs, LFS_NRESERVE(fs))) >> PAGE_SHIFT)));
976 kmem_free(fs, sizeof(struct lfs));
977 error = EFBIG; /* XXX needs translation */
978 goto out;
979 }
980
981 /* Before rolling forward, lock so vget will sleep for other procs */
982 if (l != NULL) {
983 fs->lfs_flags = LFS_NOTYET;
984 fs->lfs_rfpid = l->l_proc->p_pid;
985 }
986
987 ump = kmem_zalloc(sizeof(*ump), KM_SLEEP);
988 ump->um_lfs = fs;
989 ump->um_fstype = ULFS1;
990 /* ump->um_cleaner_thread = NULL; */
991 if (sizeof(struct lfs) < LFS_SBPAD) { /* XXX why? */
992 brelse(bp, BC_INVAL);
993 brelse(abp, BC_INVAL);
994 } else {
995 brelse(bp, 0);
996 brelse(abp, 0);
997 }
998 bp = NULL;
999 abp = NULL;
1000
1001
1002 /* Set up the I/O information */
1003 fs->lfs_devbsize = DEV_BSIZE;
1004 fs->lfs_iocount = 0;
1005 fs->lfs_diropwait = 0;
1006 fs->lfs_activesb = 0;
1007 lfs_sb_setuinodes(fs, 0);
1008 fs->lfs_ravail = 0;
1009 fs->lfs_favail = 0;
1010 fs->lfs_sbactive = 0;
1011
1012 /* Set up the ifile and lock aflags */
1013 fs->lfs_doifile = 0;
1014 fs->lfs_writer = 0;
1015 fs->lfs_dirops = 0;
1016 fs->lfs_nadirop = 0;
1017 fs->lfs_seglock = 0;
1018 fs->lfs_pdflush = 0;
1019 fs->lfs_sleepers = 0;
1020 fs->lfs_pages = 0;
1021 rw_init(&fs->lfs_fraglock);
1022 rw_init(&fs->lfs_iflock);
1023 cv_init(&fs->lfs_stopcv, "lfsstop");
1024
1025 /* Set the file system readonly/modify bits. */
1026 fs->lfs_ronly = ronly;
1027 if (ronly == 0)
1028 fs->lfs_fmod = 1;
1029
1030 /* ulfs-level information */
1031 fs->um_flags = 0;
1032 fs->um_bptrtodb = lfs_sb_getffshift(fs) - DEV_BSHIFT;
1033 fs->um_seqinc = lfs_sb_getfrag(fs);
1034 fs->um_nindir = lfs_sb_getnindir(fs);
1035 fs->um_lognindir = ffs(lfs_sb_getnindir(fs)) - 1;
1036 fs->um_maxsymlinklen = lfs_sb_getmaxsymlinklen(fs);
1037 fs->um_dirblksiz = LFS_DIRBLKSIZ;
1038 fs->um_maxfilesize = lfs_sb_getmaxfilesize(fs);
1039
1040 /* quota stuff */
1041 /* XXX: these need to come from the on-disk superblock to be used */
1042 fs->lfs_use_quota2 = 0;
1043 fs->lfs_quota_magic = 0;
1044 fs->lfs_quota_flags = 0;
1045 fs->lfs_quotaino[0] = 0;
1046 fs->lfs_quotaino[1] = 0;
1047
1048 /* Initialize the mount structure. */
1049 dev = devvp->v_rdev;
1050 mp->mnt_data = ump;
1051 mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
1052 mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_LFS);
1053 mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
1054 mp->mnt_stat.f_namemax = LFS_MAXNAMLEN;
1055 mp->mnt_stat.f_iosize = lfs_sb_getbsize(fs);
1056 mp->mnt_flag |= MNT_LOCAL;
1057 mp->mnt_fs_bshift = lfs_sb_getbshift(fs);
1058 if (fs->um_maxsymlinklen > 0)
1059 mp->mnt_iflag |= IMNT_DTYPE;
1060
1061 ump->um_mountp = mp;
1062 ump->um_dev = dev;
1063 ump->um_devvp = devvp;
1064 for (i = 0; i < ULFS_MAXQUOTAS; i++)
1065 ump->um_quotas[i] = NULLVP;
1066 spec_node_setmountedfs(devvp, mp);
1067
1068 /* Set up reserved memory for pageout */
1069 lfs_setup_resblks(fs);
1070 /* Set up vdirop tailq */
1071 TAILQ_INIT(&fs->lfs_dchainhd);
1072 /* and paging tailq */
1073 TAILQ_INIT(&fs->lfs_pchainhd);
1074 /* and delayed segment accounting for truncation list */
1075 LIST_INIT(&fs->lfs_segdhd);
1076
1077 /*
1078 * We use the ifile vnode for almost every operation. Instead of
1079 * retrieving it from the hash table each time we retrieve it here,
1080 * artificially increment the reference count and keep a pointer
1081 * to it in the incore copy of the superblock.
1082 */
1083 if ((error = VFS_VGET(mp, LFS_IFILE_INUM, &vp)) != 0) {
1084 DLOG((DLOG_MOUNT, "lfs_mountfs: ifile vget failed, error=%d\n", error));
1085 goto out;
1086 }
1087 fs->lfs_ivnode = vp;
1088 vref(vp);
1089
1090 /* Set up inode bitmap and order free list */
1091 lfs_order_freelist(fs);
1092
1093 /* Set up segment usage flags for the autocleaner. */
1094 fs->lfs_nactive = 0;
1095 fs->lfs_suflags = malloc(2 * sizeof(u_int32_t *),
1096 M_SEGMENT, M_WAITOK);
1097 fs->lfs_suflags[0] = malloc(lfs_sb_getnseg(fs) * sizeof(u_int32_t),
1098 M_SEGMENT, M_WAITOK);
1099 fs->lfs_suflags[1] = malloc(lfs_sb_getnseg(fs) * sizeof(u_int32_t),
1100 M_SEGMENT, M_WAITOK);
1101 memset(fs->lfs_suflags[1], 0, lfs_sb_getnseg(fs) * sizeof(u_int32_t));
1102 for (i = 0; i < lfs_sb_getnseg(fs); i++) {
1103 int changed;
1104
1105 LFS_SEGENTRY(sup, fs, i, bp);
1106 changed = 0;
1107 if (!ronly) {
1108 if (sup->su_nbytes == 0 &&
1109 !(sup->su_flags & SEGUSE_EMPTY)) {
1110 sup->su_flags |= SEGUSE_EMPTY;
1111 ++changed;
1112 } else if (!(sup->su_nbytes == 0) &&
1113 (sup->su_flags & SEGUSE_EMPTY)) {
1114 sup->su_flags &= ~SEGUSE_EMPTY;
1115 ++changed;
1116 }
1117 if (sup->su_flags & (SEGUSE_ACTIVE|SEGUSE_INVAL)) {
1118 sup->su_flags &= ~(SEGUSE_ACTIVE|SEGUSE_INVAL);
1119 ++changed;
1120 }
1121 }
1122 fs->lfs_suflags[0][i] = sup->su_flags;
1123 if (changed)
1124 LFS_WRITESEGENTRY(sup, fs, i, bp);
1125 else
1126 brelse(bp, 0);
1127 }
1128
1129 /*
1130 * XXX: if the fs has quotas, quotas should be on even if
1131 * readonly. Otherwise you can't query the quota info!
1132 * However, that's not how the quota2 code got written and I
1133 * don't know if it'll behave itself if enabled while
1134 * readonly, so for now use the same enable logic as ffs.
1135 *
1136 * XXX: also, if you use the -f behavior allowed here (and
1137 * equivalently above for remount) it will corrupt the fs. It
1138 * ought not to allow that. It should allow mounting readonly
1139 * if there are quotas and the kernel doesn't have the quota
1140 * code, but only readonly.
1141 *
1142 * XXX: and if you use the -f behavior allowed here it will
1143 * likely crash at unmount time (or remount time) because we
1144 * think quotas are active.
1145 *
1146 * Although none of this applies until there's a way to set
1147 * lfs_use_quota2 and have quotas in the fs at all.
1148 */
1149 if (!ronly && fs->lfs_use_quota2) {
1150 #ifdef LFS_QUOTA2
1151 error = lfs_quota2_mount(mp);
1152 #else
1153 uprintf("%s: no kernel support for this filesystem's quotas\n",
1154 mp->mnt_stat.f_mntonname);
1155 if (mp->mnt_flag & MNT_FORCE) {
1156 uprintf("%s: mounting anyway; fsck afterwards\n",
1157 mp->mnt_stat.f_mntonname);
1158 } else {
1159 error = EINVAL;
1160 }
1161 #endif
1162 if (error) {
1163 /* XXX XXX must clean up the stuff immediately above */
1164 printf("lfs_mountfs: sorry, leaking some memory\n");
1165 goto out;
1166 }
1167 }
1168
1169 #ifdef LFS_EXTATTR
1170 /*
1171 * Initialize file-backed extended attributes for ULFS1 file
1172 * systems.
1173 *
1174 * XXX: why is this limited to ULFS1?
1175 */
1176 if (ump->um_fstype == ULFS1) {
1177 ulfs_extattr_uepm_init(&ump->um_extattr);
1178 }
1179 #endif
1180
1181 #ifdef LFS_KERNEL_RFW
1182 lfs_roll_forward(fs, mp, l);
1183 #endif
1184
1185 /* If writing, sb is not clean; record in case of immediate crash */
1186 if (!fs->lfs_ronly) {
1187 lfs_sb_setpflags(fs, lfs_sb_getpflags(fs) & ~LFS_PF_CLEAN);
1188 lfs_writesuper(fs, lfs_sb_getsboff(fs, 0));
1189 lfs_writesuper(fs, lfs_sb_getsboff(fs, 1));
1190 }
1191
1192 /* Allow vget now that roll-forward is complete */
1193 fs->lfs_flags &= ~(LFS_NOTYET);
1194 wakeup(&fs->lfs_flags);
1195
1196 /*
1197 * Initialize the ifile cleaner info with information from
1198 * the superblock.
1199 */
1200 LFS_CLEANERINFO(cip, fs, bp);
1201 lfs_ci_setclean(fs, cip, lfs_sb_getnclean(fs));
1202 lfs_ci_setdirty(fs, cip, lfs_sb_getnseg(fs) - lfs_sb_getnclean(fs));
1203 lfs_ci_setavail(fs, cip, lfs_sb_getavail(fs));
1204 lfs_ci_setbfree(fs, cip, lfs_sb_getbfree(fs));
1205 (void) LFS_BWRITE_LOG(bp); /* Ifile */
1206
1207 /*
1208 * Mark the current segment as ACTIVE, since we're going to
1209 * be writing to it.
1210 */
1211 LFS_SEGENTRY(sup, fs, lfs_dtosn(fs, lfs_sb_getoffset(fs)), bp);
1212 sup->su_flags |= SEGUSE_DIRTY | SEGUSE_ACTIVE;
1213 fs->lfs_nactive++;
1214 LFS_WRITESEGENTRY(sup, fs, lfs_dtosn(fs, lfs_sb_getoffset(fs)), bp); /* Ifile */
1215
1216 /* Now that roll-forward is done, unlock the Ifile */
1217 vput(vp);
1218
1219 /* Start the pagedaemon-anticipating daemon */
1220 mutex_enter(&lfs_lock);
1221 if (lfs_writer_daemon == 0 && lfs_writer_lid == 0 &&
1222 kthread_create(PRI_BIO, 0, NULL,
1223 lfs_writerd, NULL, NULL, "lfs_writer") != 0)
1224 panic("fork lfs_writer");
1225 mutex_exit(&lfs_lock);
1226
1227 printf("WARNING: the log-structured file system is experimental\n"
1228 "WARNING: it may cause system crashes and/or corrupt data\n");
1229
1230 return (0);
1231
1232 out:
1233 if (bp)
1234 brelse(bp, 0);
1235 if (abp)
1236 brelse(abp, 0);
1237 if (ump) {
1238 kmem_free(ump->um_lfs, sizeof(struct lfs));
1239 kmem_free(ump, sizeof(*ump));
1240 mp->mnt_data = NULL;
1241 }
1242
1243 return (error);
1244 }
1245
1246 /*
1247 * unmount system call
1248 */
1249 int
1250 lfs_unmount(struct mount *mp, int mntflags)
1251 {
1252 struct lwp *l = curlwp;
1253 struct ulfsmount *ump;
1254 struct lfs *fs;
1255 int error, flags, ronly;
1256 vnode_t *vp;
1257
1258 flags = 0;
1259 if (mntflags & MNT_FORCE)
1260 flags |= FORCECLOSE;
1261
1262 ump = VFSTOULFS(mp);
1263 fs = ump->um_lfs;
1264
1265 /* Two checkpoints */
1266 lfs_segwrite(mp, SEGM_CKP | SEGM_SYNC);
1267 lfs_segwrite(mp, SEGM_CKP | SEGM_SYNC);
1268
1269 /* wake up the cleaner so it can die */
1270 /* XXX: shouldn't this be *after* the error cases below? */
1271 lfs_wakeup_cleaner(fs);
1272 mutex_enter(&lfs_lock);
1273 while (fs->lfs_sleepers)
1274 mtsleep(&fs->lfs_sleepers, PRIBIO + 1, "lfs_sleepers", 0,
1275 &lfs_lock);
1276 mutex_exit(&lfs_lock);
1277
1278 #ifdef LFS_EXTATTR
1279 if (ump->um_fstype == ULFS1) {
1280 if (ump->um_extattr.uepm_flags & ULFS_EXTATTR_UEPM_STARTED) {
1281 ulfs_extattr_stop(mp, curlwp);
1282 }
1283 if (ump->um_extattr.uepm_flags & ULFS_EXTATTR_UEPM_INITIALIZED) {
1284 ulfs_extattr_uepm_destroy(&ump->um_extattr);
1285 }
1286 }
1287 #endif
1288 #ifdef LFS_QUOTA
1289 if ((error = lfsquota1_umount(mp, flags)) != 0)
1290 return (error);
1291 #endif
1292 #ifdef LFS_QUOTA2
1293 if ((error = lfsquota2_umount(mp, flags)) != 0)
1294 return (error);
1295 #endif
1296 if ((error = vflush(mp, fs->lfs_ivnode, flags)) != 0)
1297 return (error);
1298 if ((error = VFS_SYNC(mp, 1, l->l_cred)) != 0)
1299 return (error);
1300 vp = fs->lfs_ivnode;
1301 mutex_enter(vp->v_interlock);
1302 if (LIST_FIRST(&vp->v_dirtyblkhd))
1303 panic("lfs_unmount: still dirty blocks on ifile vnode");
1304 mutex_exit(vp->v_interlock);
1305
1306 /* Explicitly write the superblock, to update serial and pflags */
1307 lfs_sb_setpflags(fs, lfs_sb_getpflags(fs) | LFS_PF_CLEAN);
1308 lfs_writesuper(fs, lfs_sb_getsboff(fs, 0));
1309 lfs_writesuper(fs, lfs_sb_getsboff(fs, 1));
1310 mutex_enter(&lfs_lock);
1311 while (fs->lfs_iocount)
1312 mtsleep(&fs->lfs_iocount, PRIBIO + 1, "lfs_umount", 0,
1313 &lfs_lock);
1314 mutex_exit(&lfs_lock);
1315
1316 /* Finish with the Ifile, now that we're done with it */
1317 vgone(fs->lfs_ivnode);
1318
1319 ronly = !fs->lfs_ronly;
1320 if (ump->um_devvp->v_type != VBAD)
1321 spec_node_setmountedfs(ump->um_devvp, NULL);
1322 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1323 error = VOP_CLOSE(ump->um_devvp,
1324 ronly ? FREAD : FREAD|FWRITE, NOCRED);
1325 vput(ump->um_devvp);
1326
1327 /* Complain about page leakage */
1328 if (fs->lfs_pages > 0)
1329 printf("lfs_unmount: still claim %d pages (%d in subsystem)\n",
1330 fs->lfs_pages, lfs_subsys_pages);
1331
1332 /* Free per-mount data structures */
1333 free(fs->lfs_ino_bitmap, M_SEGMENT);
1334 free(fs->lfs_suflags[0], M_SEGMENT);
1335 free(fs->lfs_suflags[1], M_SEGMENT);
1336 free(fs->lfs_suflags, M_SEGMENT);
1337 lfs_free_resblks(fs);
1338 cv_destroy(&fs->lfs_stopcv);
1339 rw_destroy(&fs->lfs_fraglock);
1340 rw_destroy(&fs->lfs_iflock);
1341
1342 kmem_free(fs, sizeof(struct lfs));
1343 kmem_free(ump, sizeof(*ump));
1344
1345 mp->mnt_data = NULL;
1346 mp->mnt_flag &= ~MNT_LOCAL;
1347 return (error);
1348 }
1349
1350 /*
1351 * Get file system statistics.
1352 *
1353 * NB: We don't lock to access the superblock here, because it's not
1354 * really that important if we get it wrong.
1355 */
1356 int
1357 lfs_statvfs(struct mount *mp, struct statvfs *sbp)
1358 {
1359 struct lfs *fs;
1360 struct ulfsmount *ump;
1361
1362 ump = VFSTOULFS(mp);
1363 fs = ump->um_lfs;
1364
1365 sbp->f_bsize = lfs_sb_getbsize(fs);
1366 sbp->f_frsize = lfs_sb_getfsize(fs);
1367 sbp->f_iosize = lfs_sb_getbsize(fs);
1368 sbp->f_blocks = LFS_EST_NONMETA(fs) - VTOI(fs->lfs_ivnode)->i_lfs_effnblks;
1369
1370 sbp->f_bfree = LFS_EST_BFREE(fs);
1371 /*
1372 * XXX this should be lfs_sb_getsize (measured in frags)
1373 * rather than dsize (measured in diskblocks). However,
1374 * getsize needs a format version check (for version 1 it
1375 * needs to be blockstofrags'd) so for the moment I'm going to
1376 * leave this... it won't fire wrongly as frags are at least
1377 * as big as diskblocks.
1378 */
1379 KASSERT(sbp->f_bfree <= lfs_sb_getdsize(fs));
1380 #if 0
1381 if (sbp->f_bfree < 0)
1382 sbp->f_bfree = 0;
1383 #endif
1384
1385 sbp->f_bresvd = LFS_EST_RSVD(fs);
1386 if (sbp->f_bfree > sbp->f_bresvd)
1387 sbp->f_bavail = sbp->f_bfree - sbp->f_bresvd;
1388 else
1389 sbp->f_bavail = 0;
1390
1391 /* XXX: huh? - dholland 20150728 */
1392 sbp->f_files = lfs_sb_getbfree(fs) / lfs_btofsb(fs, lfs_sb_getibsize(fs))
1393 * LFS_INOPB(fs);
1394 sbp->f_ffree = sbp->f_files - lfs_sb_getnfiles(fs);
1395 sbp->f_favail = sbp->f_ffree;
1396 sbp->f_fresvd = 0;
1397 copy_statvfs_info(sbp, mp);
1398 return (0);
1399 }
1400
1401 /*
1402 * Go through the disk queues to initiate sandbagged IO;
1403 * go through the inodes to write those that have been modified;
1404 * initiate the writing of the super block if it has been modified.
1405 *
1406 * Note: we are always called with the filesystem marked `MPBUSY'.
1407 */
1408 int
1409 lfs_sync(struct mount *mp, int waitfor, kauth_cred_t cred)
1410 {
1411 int error;
1412 struct lfs *fs;
1413
1414 fs = VFSTOULFS(mp)->um_lfs;
1415 if (fs->lfs_ronly)
1416 return 0;
1417
1418 /* Snapshots should not hose the syncer */
1419 /*
1420 * XXX Sync can block here anyway, since we don't have a very
1421 * XXX good idea of how much data is pending. If it's more
1422 * XXX than a segment and lfs_nextseg is close to the end of
1423 * XXX the log, we'll likely block.
1424 */
1425 mutex_enter(&lfs_lock);
1426 if (fs->lfs_nowrap && lfs_sb_getnextseg(fs) < lfs_sb_getcurseg(fs)) {
1427 mutex_exit(&lfs_lock);
1428 return 0;
1429 }
1430 mutex_exit(&lfs_lock);
1431
1432 lfs_writer_enter(fs, "lfs_dirops");
1433
1434 /* All syncs must be checkpoints until roll-forward is implemented. */
1435 DLOG((DLOG_FLUSH, "lfs_sync at 0x%jx\n",
1436 (uintmax_t)lfs_sb_getoffset(fs)));
1437 error = lfs_segwrite(mp, SEGM_CKP | (waitfor ? SEGM_SYNC : 0));
1438 lfs_writer_leave(fs);
1439 #ifdef LFS_QUOTA
1440 lfs_qsync(mp);
1441 #endif
1442 return (error);
1443 }
1444
1445 /*
1446 * Look up an LFS dinode number to find its incore vnode. If not already
1447 * in core, read it in from the specified device. Return the inode locked.
1448 * Detection and handling of mount points must be done by the calling routine.
1449 */
1450 int
1451 lfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
1452 {
1453 int error;
1454
1455 error = vcache_get(mp, &ino, sizeof(ino), vpp);
1456 if (error)
1457 return error;
1458 error = vn_lock(*vpp, LK_EXCLUSIVE);
1459 if (error) {
1460 vrele(*vpp);
1461 *vpp = NULL;
1462 return error;
1463 }
1464
1465 return 0;
1466 }
1467
1468 /*
1469 * Create a new vnode/inode pair and initialize what fields we can.
1470 */
1471 static void
1472 lfs_init_vnode(struct ulfsmount *ump, ino_t ino, struct vnode *vp)
1473 {
1474 struct inode *ip;
1475 struct ulfs1_dinode *dp;
1476
1477 ASSERT_NO_SEGLOCK(ump->um_lfs);
1478
1479 /* Initialize the inode. */
1480 ip = pool_get(&lfs_inode_pool, PR_WAITOK);
1481 memset(ip, 0, sizeof(*ip));
1482 dp = pool_get(&lfs_dinode_pool, PR_WAITOK);
1483 memset(dp, 0, sizeof(*dp));
1484 ip->inode_ext.lfs = pool_get(&lfs_inoext_pool, PR_WAITOK);
1485 memset(ip->inode_ext.lfs, 0, sizeof(*ip->inode_ext.lfs));
1486 ip->i_din.ffs1_din = dp;
1487 ip->i_ump = ump;
1488 ip->i_vnode = vp;
1489 ip->i_dev = ump->um_dev;
1490 ip->i_number = dp->di_inumber = ino;
1491 ip->i_lfs = ump->um_lfs;
1492 ip->i_lfs_effnblks = 0;
1493 SPLAY_INIT(&ip->i_lfs_lbtree);
1494 ip->i_lfs_nbtree = 0;
1495 LIST_INIT(&ip->i_lfs_segdhd);
1496
1497 vp->v_tag = VT_LFS;
1498 vp->v_op = lfs_vnodeop_p;
1499 vp->v_data = ip;
1500 }
1501
1502 /*
1503 * Undo lfs_init_vnode().
1504 */
1505 static void
1506 lfs_deinit_vnode(struct ulfsmount *ump, struct vnode *vp)
1507 {
1508 struct inode *ip = VTOI(vp);
1509
1510 pool_put(&lfs_inoext_pool, ip->inode_ext.lfs);
1511 pool_put(&lfs_dinode_pool, ip->i_din.ffs1_din);
1512 pool_put(&lfs_inode_pool, ip);
1513 vp->v_data = NULL;
1514 }
1515
1516 /*
1517 * Read an inode from disk and initialize this vnode / inode pair.
1518 * Caller assures no other thread will try to load this inode.
1519 */
1520 int
1521 lfs_loadvnode(struct mount *mp, struct vnode *vp,
1522 const void *key, size_t key_len, const void **new_key)
1523 {
1524 struct lfs *fs;
1525 struct ulfs1_dinode *dip;
1526 struct inode *ip;
1527 struct buf *bp;
1528 IFILE *ifp;
1529 struct ulfsmount *ump;
1530 ino_t ino;
1531 daddr_t daddr;
1532 int error, retries;
1533 struct timespec ts;
1534
1535 KASSERT(key_len == sizeof(ino));
1536 memcpy(&ino, key, key_len);
1537
1538 memset(&ts, 0, sizeof ts); /* XXX gcc */
1539
1540 ump = VFSTOULFS(mp);
1541 fs = ump->um_lfs;
1542
1543 /*
1544 * If the filesystem is not completely mounted yet, suspend
1545 * any access requests (wait for roll-forward to complete).
1546 */
1547 mutex_enter(&lfs_lock);
1548 while ((fs->lfs_flags & LFS_NOTYET) && curproc->p_pid != fs->lfs_rfpid)
1549 mtsleep(&fs->lfs_flags, PRIBIO+1, "lfs_notyet", 0,
1550 &lfs_lock);
1551 mutex_exit(&lfs_lock);
1552
1553 /* Translate the inode number to a disk address. */
1554 if (ino == LFS_IFILE_INUM)
1555 daddr = lfs_sb_getidaddr(fs);
1556 else {
1557 /* XXX bounds-check this too */
1558 LFS_IENTRY(ifp, fs, ino, bp);
1559 daddr = lfs_if_getdaddr(fs, ifp);
1560 if (lfs_sb_getversion(fs) > 1) {
1561 ts.tv_sec = lfs_if_getatime_sec(fs, ifp);
1562 ts.tv_nsec = lfs_if_getatime_nsec(fs, ifp);
1563 }
1564
1565 brelse(bp, 0);
1566 if (daddr == LFS_UNUSED_DADDR)
1567 return (ENOENT);
1568 }
1569
1570 /* Allocate/init new vnode/inode. */
1571 lfs_init_vnode(ump, ino, vp);
1572 ip = VTOI(vp);
1573
1574 /* If the cleaner supplied the inode, use it. */
1575 if (curlwp == ump->um_cleaner_thread && ump->um_cleaner_hint != NULL &&
1576 ump->um_cleaner_hint->bi_lbn == LFS_UNUSED_LBN) {
1577 dip = ump->um_cleaner_hint->bi_bp;
1578 error = copyin(dip, ip->i_din.ffs1_din,
1579 sizeof(struct ulfs1_dinode));
1580 if (error) {
1581 lfs_deinit_vnode(ump, vp);
1582 return error;
1583 }
1584 KASSERT(ip->i_number == ino);
1585 goto out;
1586 }
1587
1588 /* Read in the disk contents for the inode, copy into the inode. */
1589 retries = 0;
1590 again:
1591 error = bread(ump->um_devvp, LFS_FSBTODB(fs, daddr),
1592 (lfs_sb_getversion(fs) == 1 ? lfs_sb_getbsize(fs) : lfs_sb_getibsize(fs)),
1593 0, &bp);
1594 if (error) {
1595 lfs_deinit_vnode(ump, vp);
1596 return error;
1597 }
1598
1599 dip = lfs_ifind(fs, ino, bp);
1600 if (dip == NULL) {
1601 /* Assume write has not completed yet; try again */
1602 brelse(bp, BC_INVAL);
1603 ++retries;
1604 if (retries <= LFS_IFIND_RETRIES) {
1605 mutex_enter(&lfs_lock);
1606 if (fs->lfs_iocount) {
1607 DLOG((DLOG_VNODE,
1608 "%s: dinode %d not found, retrying...\n",
1609 __func__, ino));
1610 (void)mtsleep(&fs->lfs_iocount, PRIBIO + 1,
1611 "lfs ifind", 1, &lfs_lock);
1612 } else
1613 retries = LFS_IFIND_RETRIES;
1614 mutex_exit(&lfs_lock);
1615 goto again;
1616 }
1617 #ifdef DEBUG
1618 /* If the seglock is held look at the bpp to see
1619 what is there anyway */
1620 mutex_enter(&lfs_lock);
1621 if (fs->lfs_seglock > 0) {
1622 struct buf **bpp;
1623 struct ulfs1_dinode *dp;
1624 int i;
1625
1626 for (bpp = fs->lfs_sp->bpp;
1627 bpp != fs->lfs_sp->cbpp; ++bpp) {
1628 if ((*bpp)->b_vp == fs->lfs_ivnode &&
1629 bpp != fs->lfs_sp->bpp) {
1630 /* Inode block */
1631 printf("%s: block 0x%" PRIx64 ": ",
1632 __func__, (*bpp)->b_blkno);
1633 dp = (struct ulfs1_dinode *)
1634 (*bpp)->b_data;
1635 for (i = 0; i < LFS_INOPB(fs); i++)
1636 if (dp[i].di_inumber)
1637 printf("%d ",
1638 dp[i].di_inumber);
1639 printf("\n");
1640 }
1641 }
1642 }
1643 mutex_exit(&lfs_lock);
1644 #endif /* DEBUG */
1645 panic("lfs_loadvnode: dinode not found");
1646 }
1647 *ip->i_din.ffs1_din = *dip;
1648 brelse(bp, 0);
1649
1650 out:
1651 if (lfs_sb_getversion(fs) > 1) {
1652 ip->i_ffs1_atime = ts.tv_sec;
1653 ip->i_ffs1_atimensec = ts.tv_nsec;
1654 }
1655
1656 lfs_vinit(mp, &vp);
1657
1658 *new_key = &ip->i_number;
1659 return 0;
1660 }
1661
1662 /*
1663 * Create a new inode and initialize this vnode / inode pair.
1664 */
1665 int
1666 lfs_newvnode(struct mount *mp, struct vnode *dvp, struct vnode *vp,
1667 struct vattr *vap, kauth_cred_t cred,
1668 size_t *key_len, const void **new_key)
1669 {
1670 ino_t ino;
1671 struct inode *ip;
1672 struct ulfsmount *ump;
1673 struct lfs *fs;
1674 int error, mode, gen;
1675
1676 KASSERT(dvp != NULL || vap->va_fileid > 0);
1677 KASSERT(dvp != NULL && dvp->v_mount == mp);
1678 KASSERT(vap->va_type != VNON);
1679
1680 *key_len = sizeof(ino);
1681 ump = VFSTOULFS(mp);
1682 fs = ump->um_lfs;
1683 mode = MAKEIMODE(vap->va_type, vap->va_mode);
1684
1685 /*
1686 * Allocate fresh inode. With "dvp == NULL" take the inode number
1687 * and version from "vap".
1688 */
1689 if (dvp == NULL) {
1690 ino = vap->va_fileid;
1691 gen = vap->va_gen;
1692 error = lfs_valloc_fixed(fs, ino, gen);
1693 } else {
1694 error = lfs_valloc(dvp, mode, cred, &ino, &gen);
1695 }
1696 if (error)
1697 return error;
1698
1699 /* Attach inode to vnode. */
1700 lfs_init_vnode(ump, ino, vp);
1701 ip = VTOI(vp);
1702
1703 mutex_enter(&lfs_lock);
1704 LFS_SET_UINO(ip, IN_CHANGE);
1705 mutex_exit(&lfs_lock);
1706
1707 /* Note no blocks yet */
1708 ip->i_lfs_hiblk = -1;
1709
1710 /* Set a new generation number for this inode. */
1711 ip->i_gen = gen;
1712 ip->i_ffs1_gen = gen;
1713
1714 memset(ip->i_lfs_fragsize, 0,
1715 ULFS_NDADDR * sizeof(*ip->i_lfs_fragsize));
1716
1717 /* Set uid / gid. */
1718 if (cred == NOCRED || cred == FSCRED) {
1719 ip->i_gid = 0;
1720 ip->i_uid = 0;
1721 } else {
1722 ip->i_gid = VTOI(dvp)->i_gid;
1723 ip->i_uid = kauth_cred_geteuid(cred);
1724 }
1725 DIP_ASSIGN(ip, gid, ip->i_gid);
1726 DIP_ASSIGN(ip, uid, ip->i_uid);
1727
1728 #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
1729 error = lfs_chkiq(ip, 1, cred, 0);
1730 if (error) {
1731 lfs_vfree(dvp, ino, mode);
1732 lfs_deinit_vnode(ump, vp);
1733
1734 return error;
1735 }
1736 #endif
1737
1738 /* Set type and finalize. */
1739 ip->i_flags = 0;
1740 DIP_ASSIGN(ip, flags, 0);
1741 ip->i_mode = mode;
1742 DIP_ASSIGN(ip, mode, mode);
1743 if (vap->va_rdev != VNOVAL) {
1744 /*
1745 * Want to be able to use this to make badblock
1746 * inodes, so don't truncate the dev number.
1747 */
1748 if (ump->um_fstype == ULFS1)
1749 ip->i_ffs1_rdev = ulfs_rw32(vap->va_rdev,
1750 ULFS_MPNEEDSWAP(fs));
1751 else
1752 ip->i_ffs2_rdev = ulfs_rw64(vap->va_rdev,
1753 ULFS_MPNEEDSWAP(fs));
1754 }
1755 lfs_vinit(mp, &vp);
1756
1757 *new_key = &ip->i_number;
1758 return 0;
1759 }
1760
1761 /*
1762 * File handle to vnode
1763 */
1764 int
1765 lfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
1766 {
1767 struct lfid lfh;
1768 struct lfs *fs;
1769
1770 if (fhp->fid_len != sizeof(struct lfid))
1771 return EINVAL;
1772
1773 memcpy(&lfh, fhp, sizeof(lfh));
1774 if (lfh.lfid_ino < LFS_IFILE_INUM)
1775 return ESTALE;
1776
1777 fs = VFSTOULFS(mp)->um_lfs;
1778 if (lfh.lfid_ident != lfs_sb_getident(fs))
1779 return ESTALE;
1780
1781 if (lfh.lfid_ino >
1782 ((VTOI(fs->lfs_ivnode)->i_ffs1_size >> lfs_sb_getbshift(fs)) -
1783 lfs_sb_getcleansz(fs) - lfs_sb_getsegtabsz(fs)) * lfs_sb_getifpb(fs))
1784 return ESTALE;
1785
1786 return (ulfs_fhtovp(mp, &lfh.lfid_ufid, vpp));
1787 }
1788
1789 /*
1790 * Vnode pointer to File handle
1791 */
1792 /* ARGSUSED */
1793 int
1794 lfs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
1795 {
1796 struct inode *ip;
1797 struct lfid lfh;
1798
1799 if (*fh_size < sizeof(struct lfid)) {
1800 *fh_size = sizeof(struct lfid);
1801 return E2BIG;
1802 }
1803 *fh_size = sizeof(struct lfid);
1804 ip = VTOI(vp);
1805 memset(&lfh, 0, sizeof(lfh));
1806 lfh.lfid_len = sizeof(struct lfid);
1807 lfh.lfid_ino = ip->i_number;
1808 lfh.lfid_gen = ip->i_gen;
1809 lfh.lfid_ident = lfs_sb_getident(ip->i_lfs);
1810 memcpy(fhp, &lfh, sizeof(lfh));
1811 return (0);
1812 }
1813
1814 /*
1815 * ulfs_bmaparray callback function for writing.
1816 *
1817 * Since blocks will be written to the new segment anyway,
1818 * we don't care about current daddr of them.
1819 */
1820 static bool
1821 lfs_issequential_hole(const struct lfs *fs,
1822 daddr_t daddr0, daddr_t daddr1)
1823 {
1824 (void)fs; /* not used */
1825
1826 daddr0 = (daddr_t)((int32_t)daddr0); /* XXX ondisk32 */
1827 daddr1 = (daddr_t)((int32_t)daddr1); /* XXX ondisk32 */
1828
1829 KASSERT(daddr0 == UNWRITTEN ||
1830 (0 <= daddr0 && daddr0 <= LFS_MAX_DADDR(fs)));
1831 KASSERT(daddr1 == UNWRITTEN ||
1832 (0 <= daddr1 && daddr1 <= LFS_MAX_DADDR(fs)));
1833
1834 /* NOTE: all we want to know here is 'hole or not'. */
1835 /* NOTE: UNASSIGNED is converted to 0 by ulfs_bmaparray. */
1836
1837 /*
1838 * treat UNWRITTENs and all resident blocks as 'contiguous'
1839 */
1840 if (daddr0 != 0 && daddr1 != 0)
1841 return true;
1842
1843 /*
1844 * both are in hole?
1845 */
1846 if (daddr0 == 0 && daddr1 == 0)
1847 return true; /* all holes are 'contiguous' for us. */
1848
1849 return false;
1850 }
1851
1852 /*
1853 * lfs_gop_write functions exactly like genfs_gop_write, except that
1854 * (1) it requires the seglock to be held by its caller, and sp->fip
1855 * to be properly initialized (it will return without re-initializing
1856 * sp->fip, and without calling lfs_writeseg).
1857 * (2) it uses the remaining space in the segment, rather than VOP_BMAP,
1858 * to determine how large a block it can write at once (though it does
1859 * still use VOP_BMAP to find holes in the file);
1860 * (3) it calls lfs_gatherblock instead of VOP_STRATEGY on its blocks
1861 * (leaving lfs_writeseg to deal with the cluster blocks, so we might
1862 * now have clusters of clusters, ick.)
1863 */
1864 static int
1865 lfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
1866 int flags)
1867 {
1868 int i, error, run, haveeof = 0;
1869 int fs_bshift;
1870 vaddr_t kva;
1871 off_t eof, offset, startoffset = 0;
1872 size_t bytes, iobytes, skipbytes;
1873 bool async = (flags & PGO_SYNCIO) == 0;
1874 daddr_t lbn, blkno;
1875 struct vm_page *pg;
1876 struct buf *mbp, *bp;
1877 struct vnode *devvp = VTOI(vp)->i_devvp;
1878 struct inode *ip = VTOI(vp);
1879 struct lfs *fs = ip->i_lfs;
1880 struct segment *sp = fs->lfs_sp;
1881 SEGSUM *ssp;
1882 UVMHIST_FUNC("lfs_gop_write"); UVMHIST_CALLED(ubchist);
1883 const char * failreason = NULL;
1884
1885 ASSERT_SEGLOCK(fs);
1886
1887 /* The Ifile lives in the buffer cache */
1888 KASSERT(vp != fs->lfs_ivnode);
1889
1890 /*
1891 * We don't want to fill the disk before the cleaner has a chance
1892 * to make room for us. If we're in danger of doing that, fail
1893 * with EAGAIN. The caller will have to notice this, unlock
1894 * so the cleaner can run, relock and try again.
1895 *
1896 * We must write everything, however, if our vnode is being
1897 * reclaimed.
1898 */
1899 mutex_enter(vp->v_interlock);
1900 if (LFS_STARVED_FOR_SEGS(fs) && vdead_check(vp, VDEAD_NOWAIT) == 0) {
1901 mutex_exit(vp->v_interlock);
1902 failreason = "Starved for segs and not flushing vp";
1903 goto tryagain;
1904 }
1905 mutex_exit(vp->v_interlock);
1906
1907 /*
1908 * Sometimes things slip past the filters in lfs_putpages,
1909 * and the pagedaemon tries to write pages---problem is
1910 * that the pagedaemon never acquires the segment lock.
1911 *
1912 * Alternatively, pages that were clean when we called
1913 * genfs_putpages may have become dirty in the meantime. In this
1914 * case the segment header is not properly set up for blocks
1915 * to be added to it.
1916 *
1917 * Unbusy and unclean the pages, and put them on the ACTIVE
1918 * queue under the hypothesis that they couldn't have got here
1919 * unless they were modified *quite* recently.
1920 *
1921 * XXXUBC that last statement is an oversimplification of course.
1922 */
1923 if (!LFS_SEGLOCK_HELD(fs)) {
1924 failreason = "Seglock not held";
1925 goto tryagain;
1926 }
1927 if (ip->i_lfs_iflags & LFSI_NO_GOP_WRITE) {
1928 failreason = "Inode with no_gop_write";
1929 goto tryagain;
1930 }
1931 if ((pgs[0]->offset & lfs_sb_getbmask(fs)) != 0) {
1932 failreason = "Bad page offset";
1933 goto tryagain;
1934 }
1935
1936 UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
1937 vp, pgs, npages, flags);
1938
1939 GOP_SIZE(vp, vp->v_size, &eof, 0);
1940 haveeof = 1;
1941
1942 if (vp->v_type == VREG)
1943 fs_bshift = vp->v_mount->mnt_fs_bshift;
1944 else
1945 fs_bshift = DEV_BSHIFT;
1946 error = 0;
1947 pg = pgs[0];
1948 startoffset = pg->offset;
1949 KASSERT(eof >= 0);
1950
1951 if (startoffset >= eof) {
1952 failreason = "Offset beyond EOF";
1953 goto tryagain;
1954 } else
1955 bytes = MIN(npages << PAGE_SHIFT, eof - startoffset);
1956 skipbytes = 0;
1957
1958 KASSERT(bytes != 0);
1959
1960 /* Swap PG_DELWRI for PG_PAGEOUT */
1961 for (i = 0; i < npages; i++) {
1962 if (pgs[i]->flags & PG_DELWRI) {
1963 KASSERT(!(pgs[i]->flags & PG_PAGEOUT));
1964 pgs[i]->flags &= ~PG_DELWRI;
1965 pgs[i]->flags |= PG_PAGEOUT;
1966 uvm_pageout_start(1);
1967 mutex_enter(vp->v_interlock);
1968 mutex_enter(&uvm_pageqlock);
1969 uvm_pageunwire(pgs[i]);
1970 mutex_exit(&uvm_pageqlock);
1971 mutex_exit(vp->v_interlock);
1972 }
1973 }
1974
1975 /*
1976 * Check to make sure we're starting on a block boundary.
1977 * We'll check later to make sure we always write entire
1978 * blocks (or fragments).
1979 */
1980 if (startoffset & lfs_sb_getbmask(fs))
1981 printf("%" PRId64 " & %" PRIu64 " = %" PRId64 "\n",
1982 startoffset, lfs_sb_getbmask(fs),
1983 startoffset & lfs_sb_getbmask(fs));
1984 KASSERT((startoffset & lfs_sb_getbmask(fs)) == 0);
1985 if (bytes & lfs_sb_getffmask(fs)) {
1986 printf("lfs_gop_write: asked to write %ld bytes\n", (long)bytes);
1987 panic("lfs_gop_write: non-integer blocks");
1988 }
1989
1990 /*
1991 * We could deadlock here on pager_map with UVMPAGER_MAPIN_WAITOK.
1992 * If we would, write what we have and try again. If we don't
1993 * have anything to write, we'll have to sleep.
1994 */
1995 ssp = (SEGSUM *)sp->segsum;
1996 if ((kva = uvm_pagermapin(pgs, npages, UVMPAGER_MAPIN_WRITE |
1997 (lfs_ss_getnfinfo(fs, ssp) < 1 ?
1998 UVMPAGER_MAPIN_WAITOK : 0))) == 0x0) {
1999 DLOG((DLOG_PAGE, "lfs_gop_write: forcing write\n"));
2000 #if 0
2001 " with nfinfo=%d at offset 0x%jx\n",
2002 (int)lfs_ss_getnfinfo(fs, ssp),
2003 (uintmax_t)lfs_sb_getoffset(fs)));
2004 #endif
2005 lfs_updatemeta(sp);
2006 lfs_release_finfo(fs);
2007 (void) lfs_writeseg(fs, sp);
2008
2009 lfs_acquire_finfo(fs, ip->i_number, ip->i_gen);
2010
2011 /*
2012 * Having given up all of the pager_map we were holding,
2013 * we can now wait for aiodoned to reclaim it for us
2014 * without fear of deadlock.
2015 */
2016 kva = uvm_pagermapin(pgs, npages, UVMPAGER_MAPIN_WRITE |
2017 UVMPAGER_MAPIN_WAITOK);
2018 }
2019
2020 mbp = getiobuf(NULL, true);
2021 UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
2022 vp, mbp, vp->v_numoutput, bytes);
2023 mbp->b_bufsize = npages << PAGE_SHIFT;
2024 mbp->b_data = (void *)kva;
2025 mbp->b_resid = mbp->b_bcount = bytes;
2026 mbp->b_cflags = BC_BUSY|BC_AGE;
2027 mbp->b_iodone = uvm_aio_biodone;
2028
2029 bp = NULL;
2030 for (offset = startoffset;
2031 bytes > 0;
2032 offset += iobytes, bytes -= iobytes) {
2033 lbn = offset >> fs_bshift;
2034 error = ulfs_bmaparray(vp, lbn, &blkno, NULL, NULL, &run,
2035 lfs_issequential_hole);
2036 if (error) {
2037 UVMHIST_LOG(ubchist, "ulfs_bmaparray() -> %d",
2038 error,0,0,0);
2039 skipbytes += bytes;
2040 bytes = 0;
2041 break;
2042 }
2043
2044 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
2045 bytes);
2046 if (blkno == (daddr_t)-1) {
2047 skipbytes += iobytes;
2048 continue;
2049 }
2050
2051 /*
2052 * Discover how much we can really pack into this buffer.
2053 */
2054 /* If no room in the current segment, finish it up */
2055 if (sp->sum_bytes_left < sizeof(int32_t) ||
2056 sp->seg_bytes_left < (1 << lfs_sb_getbshift(fs))) {
2057 int vers;
2058
2059 lfs_updatemeta(sp);
2060 vers = lfs_fi_getversion(fs, sp->fip);
2061 lfs_release_finfo(fs);
2062 (void) lfs_writeseg(fs, sp);
2063
2064 lfs_acquire_finfo(fs, ip->i_number, vers);
2065 }
2066 /* Check both for space in segment and space in segsum */
2067 iobytes = MIN(iobytes, (sp->seg_bytes_left >> fs_bshift)
2068 << fs_bshift);
2069 iobytes = MIN(iobytes, (sp->sum_bytes_left / sizeof(int32_t))
2070 << fs_bshift);
2071 KASSERT(iobytes > 0);
2072
2073 /* if it's really one i/o, don't make a second buf */
2074 if (offset == startoffset && iobytes == bytes) {
2075 bp = mbp;
2076 /*
2077 * All the LFS output is done by the segwriter. It
2078 * will increment numoutput by one for all the bufs it
2079 * recieves. However this buffer needs one extra to
2080 * account for aiodone.
2081 */
2082 mutex_enter(vp->v_interlock);
2083 vp->v_numoutput++;
2084 mutex_exit(vp->v_interlock);
2085 } else {
2086 bp = getiobuf(NULL, true);
2087 UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
2088 vp, bp, vp->v_numoutput, 0);
2089 nestiobuf_setup(mbp, bp, offset - pg->offset, iobytes);
2090 /*
2091 * LFS doesn't like async I/O here, dies with
2092 * an assert in lfs_bwrite(). Is that assert
2093 * valid? I retained non-async behaviour when
2094 * converted this to use nestiobuf --pooka
2095 */
2096 bp->b_flags &= ~B_ASYNC;
2097 }
2098
2099 /* XXX This is silly ... is this necessary? */
2100 mutex_enter(&bufcache_lock);
2101 mutex_enter(vp->v_interlock);
2102 bgetvp(vp, bp);
2103 mutex_exit(vp->v_interlock);
2104 mutex_exit(&bufcache_lock);
2105
2106 bp->b_lblkno = lfs_lblkno(fs, offset);
2107 bp->b_private = mbp;
2108 if (devvp->v_type == VBLK) {
2109 bp->b_dev = devvp->v_rdev;
2110 }
2111 VOP_BWRITE(bp->b_vp, bp);
2112 while (lfs_gatherblock(sp, bp, NULL))
2113 continue;
2114 }
2115
2116 nestiobuf_done(mbp, skipbytes, error);
2117 if (skipbytes) {
2118 UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
2119 }
2120 UVMHIST_LOG(ubchist, "returning 0", 0,0,0,0);
2121
2122 if (!async) {
2123 /* Start a segment write. */
2124 UVMHIST_LOG(ubchist, "flushing", 0,0,0,0);
2125 mutex_enter(&lfs_lock);
2126 lfs_flush(fs, 0, 1);
2127 mutex_exit(&lfs_lock);
2128 }
2129
2130 if ((sp->seg_flags & SEGM_SINGLE) && lfs_sb_getcurseg(fs) != fs->lfs_startseg)
2131 return EAGAIN;
2132
2133 return (0);
2134
2135 tryagain:
2136 /*
2137 * We can't write the pages, for whatever reason.
2138 * Clean up after ourselves, and make the caller try again.
2139 */
2140 mutex_enter(vp->v_interlock);
2141
2142 /* Tell why we're here, if we know */
2143 if (failreason != NULL) {
2144 DLOG((DLOG_PAGE, "lfs_gop_write: %s\n", failreason));
2145 }
2146 if (haveeof && startoffset >= eof) {
2147 DLOG((DLOG_PAGE, "lfs_gop_write: ino %d start 0x%" PRIx64
2148 " eof 0x%" PRIx64 " npages=%d\n", VTOI(vp)->i_number,
2149 pgs[0]->offset, eof, npages));
2150 }
2151
2152 mutex_enter(&uvm_pageqlock);
2153 for (i = 0; i < npages; i++) {
2154 pg = pgs[i];
2155
2156 if (pg->flags & PG_PAGEOUT)
2157 uvm_pageout_done(1);
2158 if (pg->flags & PG_DELWRI) {
2159 uvm_pageunwire(pg);
2160 }
2161 uvm_pageactivate(pg);
2162 pg->flags &= ~(PG_CLEAN|PG_DELWRI|PG_PAGEOUT|PG_RELEASED);
2163 DLOG((DLOG_PAGE, "pg[%d] = %p (vp %p off %" PRIx64 ")\n", i, pg,
2164 vp, pg->offset));
2165 DLOG((DLOG_PAGE, "pg[%d]->flags = %x\n", i, pg->flags));
2166 DLOG((DLOG_PAGE, "pg[%d]->pqflags = %x\n", i, pg->pqflags));
2167 DLOG((DLOG_PAGE, "pg[%d]->uanon = %p\n", i, pg->uanon));
2168 DLOG((DLOG_PAGE, "pg[%d]->uobject = %p\n", i, pg->uobject));
2169 DLOG((DLOG_PAGE, "pg[%d]->wire_count = %d\n", i,
2170 pg->wire_count));
2171 DLOG((DLOG_PAGE, "pg[%d]->loan_count = %d\n", i,
2172 pg->loan_count));
2173 }
2174 /* uvm_pageunbusy takes care of PG_BUSY, PG_WANTED */
2175 uvm_page_unbusy(pgs, npages);
2176 mutex_exit(&uvm_pageqlock);
2177 mutex_exit(vp->v_interlock);
2178 return EAGAIN;
2179 }
2180
2181 /*
2182 * finish vnode/inode initialization.
2183 * used by lfs_vget.
2184 */
2185 void
2186 lfs_vinit(struct mount *mp, struct vnode **vpp)
2187 {
2188 struct vnode *vp = *vpp;
2189 struct inode *ip = VTOI(vp);
2190 struct ulfsmount *ump = VFSTOULFS(mp);
2191 struct lfs *fs = ump->um_lfs;
2192 int i;
2193
2194 ip->i_mode = ip->i_ffs1_mode;
2195 ip->i_nlink = ip->i_ffs1_nlink;
2196 ip->i_lfs_osize = ip->i_size = ip->i_ffs1_size;
2197 ip->i_flags = ip->i_ffs1_flags;
2198 ip->i_gen = ip->i_ffs1_gen;
2199 ip->i_uid = ip->i_ffs1_uid;
2200 ip->i_gid = ip->i_ffs1_gid;
2201
2202 ip->i_lfs_effnblks = ip->i_ffs1_blocks;
2203 ip->i_lfs_odnlink = ip->i_ffs1_nlink;
2204
2205 /*
2206 * Initialize the vnode from the inode, check for aliases. In all
2207 * cases re-init ip, the underlying vnode/inode may have changed.
2208 */
2209 ulfs_vinit(mp, lfs_specop_p, lfs_fifoop_p, &vp);
2210 ip = VTOI(vp);
2211
2212 memset(ip->i_lfs_fragsize, 0, ULFS_NDADDR * sizeof(*ip->i_lfs_fragsize));
2213 if (vp->v_type != VLNK || ip->i_size >= ip->i_lfs->um_maxsymlinklen) {
2214 #ifdef DEBUG
2215 for (i = (ip->i_size + lfs_sb_getbsize(fs) - 1) >> lfs_sb_getbshift(fs);
2216 i < ULFS_NDADDR; i++) {
2217 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
2218 i == 0)
2219 continue;
2220 if (ip->i_ffs1_db[i] != 0) {
2221 lfs_dump_dinode(ip->i_din.ffs1_din);
2222 panic("inconsistent inode (direct)");
2223 }
2224 }
2225 for ( ; i < ULFS_NDADDR + ULFS_NIADDR; i++) {
2226 if (ip->i_ffs1_ib[i - ULFS_NDADDR] != 0) {
2227 lfs_dump_dinode(ip->i_din.ffs1_din);
2228 panic("inconsistent inode (indirect)");
2229 }
2230 }
2231 #endif /* DEBUG */
2232 for (i = 0; i < ULFS_NDADDR; i++)
2233 if (ip->i_ffs1_db[i] != 0)
2234 ip->i_lfs_fragsize[i] = lfs_blksize(fs, ip, i);
2235 }
2236
2237 #ifdef DIAGNOSTIC
2238 if (vp->v_type == VNON) {
2239 # ifdef DEBUG
2240 lfs_dump_dinode(ip->i_din.ffs1_din);
2241 # endif
2242 panic("lfs_vinit: ino %llu is type VNON! (ifmt=%o)\n",
2243 (unsigned long long)ip->i_number,
2244 (ip->i_mode & LFS_IFMT) >> 12);
2245 }
2246 #endif /* DIAGNOSTIC */
2247
2248 /*
2249 * Finish inode initialization now that aliasing has been resolved.
2250 */
2251
2252 ip->i_devvp = ump->um_devvp;
2253 vref(ip->i_devvp);
2254 #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
2255 ulfsquota_init(ip);
2256 #endif
2257 genfs_node_init(vp, &lfs_genfsops);
2258 uvm_vnp_setsize(vp, ip->i_size);
2259
2260 /* Initialize hiblk from file size */
2261 ip->i_lfs_hiblk = lfs_lblkno(ip->i_lfs, ip->i_size + lfs_sb_getbsize(ip->i_lfs) - 1) - 1;
2262
2263 *vpp = vp;
2264 }
2265
2266 /*
2267 * Resize the filesystem to contain the specified number of segments.
2268 */
2269 int
2270 lfs_resize_fs(struct lfs *fs, int newnsegs)
2271 {
2272 SEGUSE *sup;
2273 CLEANERINFO *cip;
2274 struct buf *bp, *obp;
2275 daddr_t olast, nlast, ilast, noff, start, end;
2276 struct vnode *ivp;
2277 struct inode *ip;
2278 int error, badnews, inc, oldnsegs;
2279 int sbbytes, csbbytes, gain, cgain;
2280 int i;
2281
2282 /* Only support v2 and up */
2283 if (lfs_sb_getversion(fs) < 2)
2284 return EOPNOTSUPP;
2285
2286 /* If we're doing nothing, do it fast */
2287 oldnsegs = lfs_sb_getnseg(fs);
2288 if (newnsegs == oldnsegs)
2289 return 0;
2290
2291 /* We always have to have two superblocks */
2292 if (newnsegs <= lfs_dtosn(fs, lfs_sb_getsboff(fs, 1)))
2293 /* XXX this error code is rather nonsense */
2294 return EFBIG;
2295
2296 ivp = fs->lfs_ivnode;
2297 ip = VTOI(ivp);
2298 error = 0;
2299
2300 /* Take the segment lock so no one else calls lfs_newseg() */
2301 lfs_seglock(fs, SEGM_PROT);
2302
2303 /*
2304 * Make sure the segments we're going to be losing, if any,
2305 * are in fact empty. We hold the seglock, so their status
2306 * cannot change underneath us. Count the superblocks we lose,
2307 * while we're at it.
2308 */
2309 sbbytes = csbbytes = 0;
2310 cgain = 0;
2311 for (i = newnsegs; i < oldnsegs; i++) {
2312 LFS_SEGENTRY(sup, fs, i, bp);
2313 badnews = sup->su_nbytes || !(sup->su_flags & SEGUSE_INVAL);
2314 if (sup->su_flags & SEGUSE_SUPERBLOCK)
2315 sbbytes += LFS_SBPAD;
2316 if (!(sup->su_flags & SEGUSE_DIRTY)) {
2317 ++cgain;
2318 if (sup->su_flags & SEGUSE_SUPERBLOCK)
2319 csbbytes += LFS_SBPAD;
2320 }
2321 brelse(bp, 0);
2322 if (badnews) {
2323 error = EBUSY;
2324 goto out;
2325 }
2326 }
2327
2328 /* Note old and new segment table endpoints, and old ifile size */
2329 olast = lfs_sb_getcleansz(fs) + lfs_sb_getsegtabsz(fs);
2330 nlast = howmany(newnsegs, lfs_sb_getsepb(fs)) + lfs_sb_getcleansz(fs);
2331 ilast = ivp->v_size >> lfs_sb_getbshift(fs);
2332 noff = nlast - olast;
2333
2334 /*
2335 * Make sure no one can use the Ifile while we change it around.
2336 * Even after taking the iflock we need to make sure no one still
2337 * is holding Ifile buffers, so we get each one, to drain them.
2338 * (XXX this could be done better.)
2339 */
2340 rw_enter(&fs->lfs_iflock, RW_WRITER);
2341 for (i = 0; i < ilast; i++) {
2342 /* XXX what to do if bread fails? */
2343 bread(ivp, i, lfs_sb_getbsize(fs), 0, &bp);
2344 brelse(bp, 0);
2345 }
2346
2347 /* Allocate new Ifile blocks */
2348 for (i = ilast; i < ilast + noff; i++) {
2349 if (lfs_balloc(ivp, i * lfs_sb_getbsize(fs), lfs_sb_getbsize(fs), NOCRED, 0,
2350 &bp) != 0)
2351 panic("balloc extending ifile");
2352 memset(bp->b_data, 0, lfs_sb_getbsize(fs));
2353 VOP_BWRITE(bp->b_vp, bp);
2354 }
2355
2356 /* Register new ifile size */
2357 ip->i_size += noff * lfs_sb_getbsize(fs);
2358 ip->i_ffs1_size = ip->i_size;
2359 uvm_vnp_setsize(ivp, ip->i_size);
2360
2361 /* Copy the inode table to its new position */
2362 if (noff != 0) {
2363 if (noff < 0) {
2364 start = nlast;
2365 end = ilast + noff;
2366 inc = 1;
2367 } else {
2368 start = ilast + noff - 1;
2369 end = nlast - 1;
2370 inc = -1;
2371 }
2372 for (i = start; i != end; i += inc) {
2373 if (bread(ivp, i, lfs_sb_getbsize(fs),
2374 B_MODIFY, &bp) != 0)
2375 panic("resize: bread dst blk failed");
2376 if (bread(ivp, i - noff, lfs_sb_getbsize(fs),
2377 0, &obp))
2378 panic("resize: bread src blk failed");
2379 memcpy(bp->b_data, obp->b_data, lfs_sb_getbsize(fs));
2380 VOP_BWRITE(bp->b_vp, bp);
2381 brelse(obp, 0);
2382 }
2383 }
2384
2385 /* If we are expanding, write the new empty SEGUSE entries */
2386 if (newnsegs > oldnsegs) {
2387 for (i = oldnsegs; i < newnsegs; i++) {
2388 if ((error = bread(ivp, i / lfs_sb_getsepb(fs) +
2389 lfs_sb_getcleansz(fs), lfs_sb_getbsize(fs),
2390 B_MODIFY, &bp)) != 0)
2391 panic("lfs: ifile read: %d", error);
2392 while ((i + 1) % lfs_sb_getsepb(fs) && i < newnsegs) {
2393 sup = &((SEGUSE *)bp->b_data)[i % lfs_sb_getsepb(fs)];
2394 memset(sup, 0, sizeof(*sup));
2395 i++;
2396 }
2397 VOP_BWRITE(bp->b_vp, bp);
2398 }
2399 }
2400
2401 /* Zero out unused superblock offsets */
2402 for (i = 2; i < LFS_MAXNUMSB; i++)
2403 if (lfs_dtosn(fs, lfs_sb_getsboff(fs, i)) >= newnsegs)
2404 lfs_sb_setsboff(fs, i, 0x0);
2405
2406 /*
2407 * Correct superblock entries that depend on fs size.
2408 * The computations of these are as follows:
2409 *
2410 * size = lfs_segtod(fs, nseg)
2411 * dsize = lfs_segtod(fs, nseg - minfreeseg) - lfs_btofsb(#super * LFS_SBPAD)
2412 * bfree = dsize - lfs_btofsb(fs, bsize * nseg / 2) - blocks_actually_used
2413 * avail = lfs_segtod(fs, nclean) - lfs_btofsb(#clean_super * LFS_SBPAD)
2414 * + (lfs_segtod(fs, 1) - (offset - curseg))
2415 * - lfs_segtod(fs, minfreeseg - (minfreeseg / 2))
2416 *
2417 * XXX - we should probably adjust minfreeseg as well.
2418 */
2419 gain = (newnsegs - oldnsegs);
2420 lfs_sb_setnseg(fs, newnsegs);
2421 lfs_sb_setsegtabsz(fs, nlast - lfs_sb_getcleansz(fs));
2422 lfs_sb_addsize(fs, gain * lfs_btofsb(fs, lfs_sb_getssize(fs)));
2423 lfs_sb_adddsize(fs, gain * lfs_btofsb(fs, lfs_sb_getssize(fs)) - lfs_btofsb(fs, sbbytes));
2424 lfs_sb_addbfree(fs, gain * lfs_btofsb(fs, lfs_sb_getssize(fs)) - lfs_btofsb(fs, sbbytes)
2425 - gain * lfs_btofsb(fs, lfs_sb_getbsize(fs) / 2));
2426 if (gain > 0) {
2427 lfs_sb_addnclean(fs, gain);
2428 lfs_sb_addavail(fs, gain * lfs_btofsb(fs, lfs_sb_getssize(fs)));
2429 } else {
2430 lfs_sb_subnclean(fs, cgain);
2431 lfs_sb_subavail(fs, cgain * lfs_btofsb(fs, lfs_sb_getssize(fs)) -
2432 lfs_btofsb(fs, csbbytes));
2433 }
2434
2435 /* Resize segment flag cache */
2436 fs->lfs_suflags[0] = realloc(fs->lfs_suflags[0],
2437 lfs_sb_getnseg(fs) * sizeof(u_int32_t), M_SEGMENT, M_WAITOK);
2438 fs->lfs_suflags[1] = realloc(fs->lfs_suflags[1],
2439 lfs_sb_getnseg(fs) * sizeof(u_int32_t), M_SEGMENT, M_WAITOK);
2440 for (i = oldnsegs; i < newnsegs; i++)
2441 fs->lfs_suflags[0][i] = fs->lfs_suflags[1][i] = 0x0;
2442
2443 /* Truncate Ifile if necessary */
2444 if (noff < 0)
2445 lfs_truncate(ivp, ivp->v_size + (noff << lfs_sb_getbshift(fs)), 0,
2446 NOCRED);
2447
2448 /* Update cleaner info so the cleaner can die */
2449 /* XXX what to do if bread fails? */
2450 bread(ivp, 0, lfs_sb_getbsize(fs), B_MODIFY, &bp);
2451 cip = bp->b_data;
2452 lfs_ci_setclean(fs, cip, lfs_sb_getnclean(fs));
2453 lfs_ci_setdirty(fs, cip, lfs_sb_getnseg(fs) - lfs_sb_getnclean(fs));
2454 VOP_BWRITE(bp->b_vp, bp);
2455
2456 /* Let Ifile accesses proceed */
2457 rw_exit(&fs->lfs_iflock);
2458
2459 out:
2460 lfs_segunlock(fs);
2461 return error;
2462 }
2463
2464 /*
2465 * Extended attribute dispatch
2466 */
2467 int
2468 lfs_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
2469 int attrnamespace, const char *attrname)
2470 {
2471 #ifdef LFS_EXTATTR
2472 struct ulfsmount *ump;
2473
2474 ump = VFSTOULFS(mp);
2475 if (ump->um_fstype == ULFS1) {
2476 return ulfs_extattrctl(mp, cmd, vp, attrnamespace, attrname);
2477 }
2478 #endif
2479 return vfs_stdextattrctl(mp, cmd, vp, attrnamespace, attrname);
2480 }
2481