lfs_vfsops.c revision 1.341 1 /* $NetBSD: lfs_vfsops.c,v 1.341 2015/08/19 20:33:29 dholland Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2007, 2007
5 * The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Konrad E. Schroder <perseant (at) hhhh.org>.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32 /*-
33 * Copyright (c) 1989, 1991, 1993, 1994
34 * The Regents of the University of California. All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)lfs_vfsops.c 8.20 (Berkeley) 6/10/95
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: lfs_vfsops.c,v 1.341 2015/08/19 20:33:29 dholland Exp $");
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_lfs.h"
68 #include "opt_quota.h"
69 #endif
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/namei.h>
74 #include <sys/proc.h>
75 #include <sys/kernel.h>
76 #include <sys/vnode.h>
77 #include <sys/mount.h>
78 #include <sys/kthread.h>
79 #include <sys/buf.h>
80 #include <sys/device.h>
81 #include <sys/mbuf.h>
82 #include <sys/file.h>
83 #include <sys/disklabel.h>
84 #include <sys/ioctl.h>
85 #include <sys/errno.h>
86 #include <sys/malloc.h>
87 #include <sys/pool.h>
88 #include <sys/socket.h>
89 #include <sys/syslog.h>
90 #include <uvm/uvm_extern.h>
91 #include <sys/sysctl.h>
92 #include <sys/conf.h>
93 #include <sys/kauth.h>
94 #include <sys/module.h>
95 #include <sys/syscallvar.h>
96 #include <sys/syscall.h>
97 #include <sys/syscallargs.h>
98
99 #include <miscfs/specfs/specdev.h>
100
101 #include <ufs/lfs/ulfs_quotacommon.h>
102 #include <ufs/lfs/ulfs_inode.h>
103 #include <ufs/lfs/ulfsmount.h>
104 #include <ufs/lfs/ulfs_bswap.h>
105 #include <ufs/lfs/ulfs_extern.h>
106
107 #include <uvm/uvm.h>
108 #include <uvm/uvm_stat.h>
109 #include <uvm/uvm_pager.h>
110 #include <uvm/uvm_pdaemon.h>
111
112 #include <ufs/lfs/lfs.h>
113 #include <ufs/lfs/lfs_accessors.h>
114 #include <ufs/lfs/lfs_kernel.h>
115 #include <ufs/lfs/lfs_extern.h>
116
117 #include <miscfs/genfs/genfs.h>
118 #include <miscfs/genfs/genfs_node.h>
119
120 MODULE(MODULE_CLASS_VFS, lfs, NULL);
121
122 static int lfs_gop_write(struct vnode *, struct vm_page **, int, int);
123 static int lfs_mountfs(struct vnode *, struct mount *, struct lwp *);
124
125 static struct sysctllog *lfs_sysctl_log;
126
127 extern const struct vnodeopv_desc lfs_vnodeop_opv_desc;
128 extern const struct vnodeopv_desc lfs_specop_opv_desc;
129 extern const struct vnodeopv_desc lfs_fifoop_opv_desc;
130
131 pid_t lfs_writer_daemon = 0;
132 lwpid_t lfs_writer_lid = 0;
133 int lfs_do_flush = 0;
134 #ifdef LFS_KERNEL_RFW
135 int lfs_do_rfw = 0;
136 #endif
137
138 const struct vnodeopv_desc * const lfs_vnodeopv_descs[] = {
139 &lfs_vnodeop_opv_desc,
140 &lfs_specop_opv_desc,
141 &lfs_fifoop_opv_desc,
142 NULL,
143 };
144
145 struct vfsops lfs_vfsops = {
146 .vfs_name = MOUNT_LFS,
147 .vfs_min_mount_data = sizeof (struct ulfs_args),
148 .vfs_mount = lfs_mount,
149 .vfs_start = ulfs_start,
150 .vfs_unmount = lfs_unmount,
151 .vfs_root = ulfs_root,
152 .vfs_quotactl = ulfs_quotactl,
153 .vfs_statvfs = lfs_statvfs,
154 .vfs_sync = lfs_sync,
155 .vfs_vget = lfs_vget,
156 .vfs_loadvnode = lfs_loadvnode,
157 .vfs_newvnode = lfs_newvnode,
158 .vfs_fhtovp = lfs_fhtovp,
159 .vfs_vptofh = lfs_vptofh,
160 .vfs_init = lfs_init,
161 .vfs_reinit = lfs_reinit,
162 .vfs_done = lfs_done,
163 .vfs_mountroot = lfs_mountroot,
164 .vfs_snapshot = (void *)eopnotsupp,
165 .vfs_extattrctl = lfs_extattrctl,
166 .vfs_suspendctl = (void *)eopnotsupp,
167 .vfs_renamelock_enter = genfs_renamelock_enter,
168 .vfs_renamelock_exit = genfs_renamelock_exit,
169 .vfs_fsync = (void *)eopnotsupp,
170 .vfs_opv_descs = lfs_vnodeopv_descs
171 };
172
173 const struct genfs_ops lfs_genfsops = {
174 .gop_size = lfs_gop_size,
175 .gop_alloc = ulfs_gop_alloc,
176 .gop_write = lfs_gop_write,
177 .gop_markupdate = ulfs_gop_markupdate,
178 };
179
180 struct shortlong {
181 const char *sname;
182 const char *lname;
183 };
184
185 static int
186 sysctl_lfs_dostats(SYSCTLFN_ARGS)
187 {
188 extern struct lfs_stats lfs_stats;
189 extern int lfs_dostats;
190 int error;
191
192 error = sysctl_lookup(SYSCTLFN_CALL(rnode));
193 if (error || newp == NULL)
194 return (error);
195
196 if (lfs_dostats == 0)
197 memset(&lfs_stats, 0, sizeof(lfs_stats));
198
199 return (0);
200 }
201
202 static void
203 lfs_sysctl_setup(struct sysctllog **clog)
204 {
205 int i;
206 extern int lfs_writeindir, lfs_dostats, lfs_clean_vnhead,
207 lfs_fs_pagetrip, lfs_ignore_lazy_sync;
208 #ifdef DEBUG
209 extern int lfs_debug_log_subsys[DLOG_MAX];
210 struct shortlong dlog_names[DLOG_MAX] = { /* Must match lfs.h ! */
211 { "rollforward", "Debug roll-forward code" },
212 { "alloc", "Debug inode allocation and free list" },
213 { "avail", "Debug space-available-now accounting" },
214 { "flush", "Debug flush triggers" },
215 { "lockedlist", "Debug locked list accounting" },
216 { "vnode_verbose", "Verbose per-vnode-written debugging" },
217 { "vnode", "Debug vnode use during segment write" },
218 { "segment", "Debug segment writing" },
219 { "seguse", "Debug segment used-bytes accounting" },
220 { "cleaner", "Debug cleaning routines" },
221 { "mount", "Debug mount/unmount routines" },
222 { "pagecache", "Debug UBC interactions" },
223 { "dirop", "Debug directory-operation accounting" },
224 { "malloc", "Debug private malloc accounting" },
225 };
226 #endif /* DEBUG */
227 struct shortlong stat_names[] = { /* Must match lfs.h! */
228 { "segsused", "Number of new segments allocated" },
229 { "psegwrites", "Number of partial-segment writes" },
230 { "psyncwrites", "Number of synchronous partial-segment"
231 " writes" },
232 { "pcleanwrites", "Number of partial-segment writes by the"
233 " cleaner" },
234 { "blocktot", "Number of blocks written" },
235 { "cleanblocks", "Number of blocks written by the cleaner" },
236 { "ncheckpoints", "Number of checkpoints made" },
237 { "nwrites", "Number of whole writes" },
238 { "nsync_writes", "Number of synchronous writes" },
239 { "wait_exceeded", "Number of times writer waited for"
240 " cleaner" },
241 { "write_exceeded", "Number of times writer invoked flush" },
242 { "flush_invoked", "Number of times flush was invoked" },
243 { "vflush_invoked", "Number of time vflush was called" },
244 { "clean_inlocked", "Number of vnodes skipped for being dead" },
245 { "clean_vnlocked", "Number of vnodes skipped for vget failure" },
246 { "segs_reclaimed", "Number of segments reclaimed" },
247 };
248
249 sysctl_createv(clog, 0, NULL, NULL,
250 CTLFLAG_PERMANENT,
251 CTLTYPE_NODE, "lfs",
252 SYSCTL_DESCR("Log-structured file system"),
253 NULL, 0, NULL, 0,
254 CTL_VFS, 5, CTL_EOL);
255 /*
256 * XXX the "5" above could be dynamic, thereby eliminating one
257 * more instance of the "number to vfs" mapping problem, but
258 * "5" is the order as taken from sys/mount.h
259 */
260
261 sysctl_createv(clog, 0, NULL, NULL,
262 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
263 CTLTYPE_INT, "flushindir", NULL,
264 NULL, 0, &lfs_writeindir, 0,
265 CTL_VFS, 5, LFS_WRITEINDIR, CTL_EOL);
266 sysctl_createv(clog, 0, NULL, NULL,
267 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
268 CTLTYPE_INT, "clean_vnhead", NULL,
269 NULL, 0, &lfs_clean_vnhead, 0,
270 CTL_VFS, 5, LFS_CLEAN_VNHEAD, CTL_EOL);
271 sysctl_createv(clog, 0, NULL, NULL,
272 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
273 CTLTYPE_INT, "dostats",
274 SYSCTL_DESCR("Maintain statistics on LFS operations"),
275 sysctl_lfs_dostats, 0, &lfs_dostats, 0,
276 CTL_VFS, 5, LFS_DOSTATS, CTL_EOL);
277 sysctl_createv(clog, 0, NULL, NULL,
278 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
279 CTLTYPE_INT, "pagetrip",
280 SYSCTL_DESCR("How many dirty pages in fs triggers"
281 " a flush"),
282 NULL, 0, &lfs_fs_pagetrip, 0,
283 CTL_VFS, 5, LFS_FS_PAGETRIP, CTL_EOL);
284 sysctl_createv(clog, 0, NULL, NULL,
285 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
286 CTLTYPE_INT, "ignore_lazy_sync",
287 SYSCTL_DESCR("Lazy Sync is ignored entirely"),
288 NULL, 0, &lfs_ignore_lazy_sync, 0,
289 CTL_VFS, 5, LFS_IGNORE_LAZY_SYNC, CTL_EOL);
290 #ifdef LFS_KERNEL_RFW
291 sysctl_createv(clog, 0, NULL, NULL,
292 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
293 CTLTYPE_INT, "rfw",
294 SYSCTL_DESCR("Use in-kernel roll-forward on mount"),
295 NULL, 0, &lfs_do_rfw, 0,
296 CTL_VFS, 5, LFS_DO_RFW, CTL_EOL);
297 #endif
298
299 sysctl_createv(clog, 0, NULL, NULL,
300 CTLFLAG_PERMANENT,
301 CTLTYPE_NODE, "stats",
302 SYSCTL_DESCR("Debugging options"),
303 NULL, 0, NULL, 0,
304 CTL_VFS, 5, LFS_STATS, CTL_EOL);
305 for (i = 0; i < sizeof(struct lfs_stats) / sizeof(u_int); i++) {
306 sysctl_createv(clog, 0, NULL, NULL,
307 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
308 CTLTYPE_INT, stat_names[i].sname,
309 SYSCTL_DESCR(stat_names[i].lname),
310 NULL, 0, &(((u_int *)&lfs_stats.segsused)[i]),
311 0, CTL_VFS, 5, LFS_STATS, i, CTL_EOL);
312 }
313
314 #ifdef DEBUG
315 sysctl_createv(clog, 0, NULL, NULL,
316 CTLFLAG_PERMANENT,
317 CTLTYPE_NODE, "debug",
318 SYSCTL_DESCR("Debugging options"),
319 NULL, 0, NULL, 0,
320 CTL_VFS, 5, LFS_DEBUGLOG, CTL_EOL);
321 for (i = 0; i < DLOG_MAX; i++) {
322 sysctl_createv(clog, 0, NULL, NULL,
323 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
324 CTLTYPE_INT, dlog_names[i].sname,
325 SYSCTL_DESCR(dlog_names[i].lname),
326 NULL, 0, &(lfs_debug_log_subsys[i]), 0,
327 CTL_VFS, 5, LFS_DEBUGLOG, i, CTL_EOL);
328 }
329 #endif
330 }
331
332 /* old cleaner syscall interface. see VOP_FCNTL() */
333 static const struct syscall_package lfs_syscalls[] = {
334 { SYS_lfs_bmapv, 0, (sy_call_t *)sys_lfs_bmapv },
335 { SYS_lfs_markv, 0, (sy_call_t *)sys_lfs_markv },
336 { SYS___lfs_segwait50, 0, (sy_call_t *)sys___lfs_segwait50 },
337 { SYS_lfs_segclean, 0, (sy_call_t *)sys_lfs_segclean },
338 { 0, 0, NULL },
339 };
340
341 static int
342 lfs_modcmd(modcmd_t cmd, void *arg)
343 {
344 int error;
345
346 switch (cmd) {
347 case MODULE_CMD_INIT:
348 error = syscall_establish(NULL, lfs_syscalls);
349 if (error)
350 return error;
351 error = vfs_attach(&lfs_vfsops);
352 if (error != 0) {
353 syscall_disestablish(NULL, lfs_syscalls);
354 break;
355 }
356 lfs_sysctl_setup(&lfs_sysctl_log);
357 break;
358 case MODULE_CMD_FINI:
359 error = vfs_detach(&lfs_vfsops);
360 if (error != 0)
361 break;
362 syscall_disestablish(NULL, lfs_syscalls);
363 sysctl_teardown(&lfs_sysctl_log);
364 break;
365 default:
366 error = ENOTTY;
367 break;
368 }
369
370 return (error);
371 }
372
373 /*
374 * XXX Same structure as FFS inodes? Should we share a common pool?
375 */
376 struct pool lfs_inode_pool;
377 struct pool lfs_dinode_pool;
378 struct pool lfs_inoext_pool;
379 struct pool lfs_lbnentry_pool;
380
381 /*
382 * The writer daemon. UVM keeps track of how many dirty pages we are holding
383 * in lfs_subsys_pages; the daemon flushes the filesystem when this value
384 * crosses the (user-defined) threshhold LFS_MAX_PAGES.
385 */
386 static void
387 lfs_writerd(void *arg)
388 {
389 struct mount *mp, *nmp;
390 struct lfs *fs;
391 struct vfsops *vfs = NULL;
392 int fsflags;
393 int skipc;
394 int lfsc;
395 int wrote_something = 0;
396
397 mutex_enter(&lfs_lock);
398 lfs_writer_daemon = curproc->p_pid;
399 lfs_writer_lid = curlwp->l_lid;
400 mutex_exit(&lfs_lock);
401
402 /* Take an extra reference to the LFS vfsops. */
403 vfs = vfs_getopsbyname(MOUNT_LFS);
404
405 mutex_enter(&lfs_lock);
406 for (;;) {
407 KASSERT(mutex_owned(&lfs_lock));
408 if (wrote_something == 0)
409 mtsleep(&lfs_writer_daemon, PVM, "lfswriter", hz/10 + 1,
410 &lfs_lock);
411
412 KASSERT(mutex_owned(&lfs_lock));
413 wrote_something = 0;
414
415 /*
416 * If global state wants a flush, flush everything.
417 */
418 if (lfs_do_flush || locked_queue_count > LFS_MAX_BUFS ||
419 locked_queue_bytes > LFS_MAX_BYTES ||
420 lfs_subsys_pages > LFS_MAX_PAGES) {
421
422 if (lfs_do_flush) {
423 DLOG((DLOG_FLUSH, "lfs_writerd: lfs_do_flush\n"));
424 }
425 if (locked_queue_count > LFS_MAX_BUFS) {
426 DLOG((DLOG_FLUSH, "lfs_writerd: lqc = %d, max %d\n",
427 locked_queue_count, LFS_MAX_BUFS));
428 }
429 if (locked_queue_bytes > LFS_MAX_BYTES) {
430 DLOG((DLOG_FLUSH, "lfs_writerd: lqb = %ld, max %ld\n",
431 locked_queue_bytes, LFS_MAX_BYTES));
432 }
433 if (lfs_subsys_pages > LFS_MAX_PAGES) {
434 DLOG((DLOG_FLUSH, "lfs_writerd: lssp = %d, max %d\n",
435 lfs_subsys_pages, LFS_MAX_PAGES));
436 }
437
438 lfs_flush(NULL, SEGM_WRITERD, 0);
439 lfs_do_flush = 0;
440 KASSERT(mutex_owned(&lfs_lock));
441 continue;
442 }
443 KASSERT(mutex_owned(&lfs_lock));
444 mutex_exit(&lfs_lock);
445
446 /*
447 * Look through the list of LFSs to see if any of them
448 * have requested pageouts.
449 */
450 mutex_enter(&mountlist_lock);
451 lfsc = 0;
452 skipc = 0;
453 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
454 if (vfs_busy(mp, &nmp)) {
455 ++skipc;
456 continue;
457 }
458 KASSERT(!mutex_owned(&lfs_lock));
459 if (strncmp(mp->mnt_stat.f_fstypename, MOUNT_LFS,
460 sizeof(mp->mnt_stat.f_fstypename)) == 0) {
461 ++lfsc;
462 fs = VFSTOULFS(mp)->um_lfs;
463 daddr_t ooffset = 0;
464 fsflags = SEGM_SINGLE;
465
466 mutex_enter(&lfs_lock);
467 ooffset = lfs_sb_getoffset(fs);
468
469 if (lfs_sb_getnextseg(fs) < lfs_sb_getcurseg(fs) && fs->lfs_nowrap) {
470 /* Don't try to write if we're suspended */
471 mutex_exit(&lfs_lock);
472 vfs_unbusy(mp, false, &nmp);
473 continue;
474 }
475 if (LFS_STARVED_FOR_SEGS(fs)) {
476 mutex_exit(&lfs_lock);
477
478 DLOG((DLOG_FLUSH, "lfs_writerd: need cleaning before writing possible\n"));
479 lfs_wakeup_cleaner(fs);
480 vfs_unbusy(mp, false, &nmp);
481 continue;
482 }
483
484 if ((fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
485 lfs_dirvcount > LFS_MAX_DIROP) &&
486 fs->lfs_dirops == 0) {
487 fsflags &= ~SEGM_SINGLE;
488 fsflags |= SEGM_CKP;
489 DLOG((DLOG_FLUSH, "lfs_writerd: checkpoint\n"));
490 lfs_flush_fs(fs, fsflags);
491 } else if (fs->lfs_pdflush) {
492 DLOG((DLOG_FLUSH, "lfs_writerd: pdflush set\n"));
493 lfs_flush_fs(fs, fsflags);
494 } else if (!TAILQ_EMPTY(&fs->lfs_pchainhd)) {
495 DLOG((DLOG_FLUSH, "lfs_writerd: pchain non-empty\n"));
496 mutex_exit(&lfs_lock);
497 lfs_writer_enter(fs, "wrdirop");
498 lfs_flush_pchain(fs);
499 lfs_writer_leave(fs);
500 mutex_enter(&lfs_lock);
501 }
502 if (lfs_sb_getoffset(fs) != ooffset)
503 ++wrote_something;
504 mutex_exit(&lfs_lock);
505 }
506 KASSERT(!mutex_owned(&lfs_lock));
507 vfs_unbusy(mp, false, &nmp);
508 }
509 if (lfsc + skipc == 0) {
510 mutex_enter(&lfs_lock);
511 lfs_writer_daemon = 0;
512 lfs_writer_lid = 0;
513 mutex_exit(&lfs_lock);
514 mutex_exit(&mountlist_lock);
515 break;
516 }
517 mutex_exit(&mountlist_lock);
518
519 mutex_enter(&lfs_lock);
520 }
521 KASSERT(!mutex_owned(&lfs_lock));
522 KASSERT(!mutex_owned(&mountlist_lock));
523
524 /* Give up our extra reference so the module can be unloaded. */
525 mutex_enter(&vfs_list_lock);
526 if (vfs != NULL)
527 vfs->vfs_refcount--;
528 mutex_exit(&vfs_list_lock);
529
530 /* Done! */
531 kthread_exit(0);
532 }
533
534 /*
535 * Initialize the filesystem, most work done by ulfs_init.
536 */
537 void
538 lfs_init(void)
539 {
540
541 /*
542 * XXX: should we use separate pools for 32-bit and 64-bit
543 * dinodes?
544 */
545 malloc_type_attach(M_SEGMENT);
546 pool_init(&lfs_inode_pool, sizeof(struct inode), 0, 0, 0,
547 "lfsinopl", &pool_allocator_nointr, IPL_NONE);
548 pool_init(&lfs_dinode_pool, sizeof(union lfs_dinode), 0, 0, 0,
549 "lfsdinopl", &pool_allocator_nointr, IPL_NONE);
550 pool_init(&lfs_inoext_pool, sizeof(struct lfs_inode_ext), 8, 0, 0,
551 "lfsinoextpl", &pool_allocator_nointr, IPL_NONE);
552 pool_init(&lfs_lbnentry_pool, sizeof(struct lbnentry), 0, 0, 0,
553 "lfslbnpool", &pool_allocator_nointr, IPL_NONE);
554 ulfs_init();
555
556 #ifdef DEBUG
557 memset(lfs_log, 0, sizeof(lfs_log));
558 #endif
559 mutex_init(&lfs_lock, MUTEX_DEFAULT, IPL_NONE);
560 cv_init(&locked_queue_cv, "lfsbuf");
561 cv_init(&lfs_writing_cv, "lfsflush");
562 }
563
564 void
565 lfs_reinit(void)
566 {
567 ulfs_reinit();
568 }
569
570 void
571 lfs_done(void)
572 {
573 ulfs_done();
574 mutex_destroy(&lfs_lock);
575 cv_destroy(&locked_queue_cv);
576 cv_destroy(&lfs_writing_cv);
577 pool_destroy(&lfs_inode_pool);
578 pool_destroy(&lfs_dinode_pool);
579 pool_destroy(&lfs_inoext_pool);
580 pool_destroy(&lfs_lbnentry_pool);
581 malloc_type_detach(M_SEGMENT);
582 }
583
584 /*
585 * Called by main() when ulfs is going to be mounted as root.
586 */
587 int
588 lfs_mountroot(void)
589 {
590 extern struct vnode *rootvp;
591 struct lfs *fs = NULL; /* LFS */
592 struct mount *mp;
593 struct lwp *l = curlwp;
594 struct ulfsmount *ump;
595 int error;
596
597 if (device_class(root_device) != DV_DISK)
598 return (ENODEV);
599
600 if (rootdev == NODEV)
601 return (ENODEV);
602 if ((error = vfs_rootmountalloc(MOUNT_LFS, "root_device", &mp))) {
603 vrele(rootvp);
604 return (error);
605 }
606 if ((error = lfs_mountfs(rootvp, mp, l))) {
607 vfs_unbusy(mp, false, NULL);
608 vfs_destroy(mp);
609 return (error);
610 }
611 mountlist_append(mp);
612 ump = VFSTOULFS(mp);
613 fs = ump->um_lfs;
614 lfs_sb_setfsmnt(fs, mp->mnt_stat.f_mntonname);
615 (void)lfs_statvfs(mp, &mp->mnt_stat);
616 vfs_unbusy(mp, false, NULL);
617 setrootfstime((time_t)lfs_sb_gettstamp(VFSTOULFS(mp)->um_lfs));
618 return (0);
619 }
620
621 /*
622 * VFS Operations.
623 *
624 * mount system call
625 */
626 int
627 lfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
628 {
629 struct lwp *l = curlwp;
630 struct vnode *devvp;
631 struct ulfs_args *args = data;
632 struct ulfsmount *ump = NULL;
633 struct lfs *fs = NULL; /* LFS */
634 int error = 0, update;
635 mode_t accessmode;
636
637 if (args == NULL)
638 return EINVAL;
639 if (*data_len < sizeof *args)
640 return EINVAL;
641
642 if (mp->mnt_flag & MNT_GETARGS) {
643 ump = VFSTOULFS(mp);
644 if (ump == NULL)
645 return EIO;
646 args->fspec = NULL;
647 *data_len = sizeof *args;
648 return 0;
649 }
650
651 update = mp->mnt_flag & MNT_UPDATE;
652
653 /* Check arguments */
654 if (args->fspec != NULL) {
655 /*
656 * Look up the name and verify that it's sane.
657 */
658 error = namei_simple_user(args->fspec,
659 NSM_FOLLOW_NOEMULROOT, &devvp);
660 if (error != 0)
661 return (error);
662
663 if (!update) {
664 /*
665 * Be sure this is a valid block device
666 */
667 if (devvp->v_type != VBLK)
668 error = ENOTBLK;
669 else if (bdevsw_lookup(devvp->v_rdev) == NULL)
670 error = ENXIO;
671 } else {
672 /*
673 * Be sure we're still naming the same device
674 * used for our initial mount
675 */
676 ump = VFSTOULFS(mp);
677 if (devvp != ump->um_devvp) {
678 if (devvp->v_rdev != ump->um_devvp->v_rdev)
679 error = EINVAL;
680 else {
681 vrele(devvp);
682 devvp = ump->um_devvp;
683 vref(devvp);
684 }
685 }
686 }
687 } else {
688 if (!update) {
689 /* New mounts must have a filename for the device */
690 return (EINVAL);
691 } else {
692 /* Use the extant mount */
693 ump = VFSTOULFS(mp);
694 devvp = ump->um_devvp;
695 vref(devvp);
696 }
697 }
698
699
700 /*
701 * If mount by non-root, then verify that user has necessary
702 * permissions on the device.
703 */
704 if (error == 0) {
705 accessmode = VREAD;
706 if (update ?
707 (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
708 (mp->mnt_flag & MNT_RDONLY) == 0)
709 accessmode |= VWRITE;
710 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
711 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
712 KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp,
713 KAUTH_ARG(accessmode));
714 VOP_UNLOCK(devvp);
715 }
716
717 if (error) {
718 vrele(devvp);
719 return (error);
720 }
721
722 if (!update) {
723 int flags;
724
725 if (mp->mnt_flag & MNT_RDONLY)
726 flags = FREAD;
727 else
728 flags = FREAD|FWRITE;
729 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
730 error = VOP_OPEN(devvp, flags, FSCRED);
731 VOP_UNLOCK(devvp);
732 if (error)
733 goto fail;
734 error = lfs_mountfs(devvp, mp, l); /* LFS */
735 if (error) {
736 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
737 (void)VOP_CLOSE(devvp, flags, NOCRED);
738 VOP_UNLOCK(devvp);
739 goto fail;
740 }
741
742 ump = VFSTOULFS(mp);
743 fs = ump->um_lfs;
744 } else {
745 /*
746 * Update the mount.
747 */
748
749 /*
750 * The initial mount got a reference on this
751 * device, so drop the one obtained via
752 * namei(), above.
753 */
754 vrele(devvp);
755
756 ump = VFSTOULFS(mp);
757 fs = ump->um_lfs;
758
759 if (fs->lfs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
760 /*
761 * Changing from read/write to read-only.
762 * XXX: shouldn't we sync here? or does vfs do that?
763 */
764 #ifdef LFS_QUOTA2
765 /* XXX: quotas should remain on when readonly */
766 if (fs->lfs_use_quota2) {
767 error = lfsquota2_umount(mp, 0);
768 if (error) {
769 return error;
770 }
771 }
772 #endif
773 }
774
775 if (fs->lfs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
776 /*
777 * Changing from read-only to read/write.
778 * Note in the superblocks that we're writing.
779 */
780
781 /* XXX: quotas should have been on even if readonly */
782 if (fs->lfs_use_quota2) {
783 #ifdef LFS_QUOTA2
784 error = lfs_quota2_mount(mp);
785 #else
786 uprintf("%s: no kernel support for this "
787 "filesystem's quotas\n",
788 mp->mnt_stat.f_mntonname);
789 if (mp->mnt_flag & MNT_FORCE) {
790 uprintf("%s: mounting anyway; "
791 "fsck afterwards\n",
792 mp->mnt_stat.f_mntonname);
793 } else {
794 error = EINVAL;
795 }
796 #endif
797 if (error) {
798 return error;
799 }
800 }
801
802 fs->lfs_ronly = 0;
803 if (lfs_sb_getpflags(fs) & LFS_PF_CLEAN) {
804 lfs_sb_setpflags(fs, lfs_sb_getpflags(fs) & ~LFS_PF_CLEAN);
805 lfs_writesuper(fs, lfs_sb_getsboff(fs, 0));
806 lfs_writesuper(fs, lfs_sb_getsboff(fs, 1));
807 }
808 }
809 if (args->fspec == NULL)
810 return EINVAL;
811 }
812
813 error = set_statvfs_info(path, UIO_USERSPACE, args->fspec,
814 UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
815 if (error == 0)
816 lfs_sb_setfsmnt(fs, mp->mnt_stat.f_mntonname);
817 return error;
818
819 fail:
820 vrele(devvp);
821 return (error);
822 }
823
824
825 /*
826 * Common code for mount and mountroot
827 * LFS specific
828 */
829 int
830 lfs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l)
831 {
832 struct dlfs *tdfs, *dfs, *adfs;
833 struct lfs *fs;
834 struct ulfsmount *ump;
835 struct vnode *vp;
836 struct buf *bp, *abp;
837 dev_t dev;
838 int error, i, ronly, fsbsize;
839 kauth_cred_t cred;
840 CLEANERINFO *cip;
841 SEGUSE *sup;
842 daddr_t sb_addr;
843
844 cred = l ? l->l_cred : NOCRED;
845
846 /*
847 * Flush out any old buffers remaining from a previous use.
848 */
849 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
850 error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
851 VOP_UNLOCK(devvp);
852 if (error)
853 return (error);
854
855 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
856
857 /* Don't free random space on error. */
858 bp = NULL;
859 abp = NULL;
860 ump = NULL;
861
862 sb_addr = LFS_LABELPAD / DEV_BSIZE;
863 while (1) {
864 /* Read in the superblock. */
865 error = bread(devvp, sb_addr, LFS_SBPAD, 0, &bp);
866 if (error)
867 goto out;
868 dfs = (struct dlfs *)bp->b_data;
869
870 /* Check the basics. */
871 if (dfs->dlfs_magic != LFS_MAGIC || dfs->dlfs_bsize > MAXBSIZE ||
872 dfs->dlfs_version > LFS_VERSION ||
873 dfs->dlfs_bsize < sizeof(struct dlfs)) {
874 DLOG((DLOG_MOUNT, "lfs_mountfs: primary superblock sanity failed\n"));
875 error = EINVAL; /* XXX needs translation */
876 goto out;
877 }
878 if (dfs->dlfs_inodefmt > LFS_MAXINODEFMT) {
879 DLOG((DLOG_MOUNT, "lfs_mountfs: unknown inode format %d\n",
880 dfs->dlfs_inodefmt));
881 error = EINVAL;
882 goto out;
883 }
884
885 if (dfs->dlfs_version == 1)
886 fsbsize = DEV_BSIZE;
887 else {
888 fsbsize = 1 << dfs->dlfs_ffshift;
889 /*
890 * Could be, if the frag size is large enough, that we
891 * don't have the "real" primary superblock. If that's
892 * the case, get the real one, and try again.
893 */
894 if (sb_addr != (dfs->dlfs_sboffs[0] << (dfs->dlfs_ffshift - DEV_BSHIFT))) {
895 DLOG((DLOG_MOUNT, "lfs_mountfs: sb daddr"
896 " 0x%llx is not right, trying 0x%llx\n",
897 (long long)sb_addr,
898 (long long)(dfs->dlfs_sboffs[0] << (dfs->dlfs_ffshift - DEV_BSHIFT))));
899 sb_addr = dfs->dlfs_sboffs[0] << (dfs->dlfs_ffshift - DEV_BSHIFT);
900 brelse(bp, 0);
901 continue;
902 }
903 }
904 break;
905 }
906
907 /*
908 * Check the second superblock to see which is newer; then mount
909 * using the older of the two. This is necessary to ensure that
910 * the filesystem is valid if it was not unmounted cleanly.
911 */
912
913 if (dfs->dlfs_sboffs[1] &&
914 dfs->dlfs_sboffs[1] - LFS_LABELPAD / fsbsize > LFS_SBPAD / fsbsize)
915 {
916 error = bread(devvp, dfs->dlfs_sboffs[1] * (fsbsize / DEV_BSIZE),
917 LFS_SBPAD, 0, &abp);
918 if (error)
919 goto out;
920 adfs = (struct dlfs *)abp->b_data;
921
922 if (dfs->dlfs_version == 1) {
923 /* 1s resolution comparison */
924 if (adfs->dlfs_tstamp < dfs->dlfs_tstamp)
925 tdfs = adfs;
926 else
927 tdfs = dfs;
928 } else {
929 /* monotonic infinite-resolution comparison */
930 if (adfs->dlfs_serial < dfs->dlfs_serial)
931 tdfs = adfs;
932 else
933 tdfs = dfs;
934 }
935
936 /* Check the basics. */
937 if (tdfs->dlfs_magic != LFS_MAGIC ||
938 tdfs->dlfs_bsize > MAXBSIZE ||
939 tdfs->dlfs_version > LFS_VERSION ||
940 tdfs->dlfs_bsize < sizeof(struct dlfs)) {
941 DLOG((DLOG_MOUNT, "lfs_mountfs: alt superblock"
942 " sanity failed\n"));
943 error = EINVAL; /* XXX needs translation */
944 goto out;
945 }
946 } else {
947 DLOG((DLOG_MOUNT, "lfs_mountfs: invalid alt superblock"
948 " daddr=0x%x\n", dfs->dlfs_sboffs[1]));
949 error = EINVAL;
950 goto out;
951 }
952
953 /* Allocate the mount structure, copy the superblock into it. */
954 fs = kmem_zalloc(sizeof(struct lfs), KM_SLEEP);
955 memcpy(&fs->lfs_dlfs_u.u_32, tdfs, sizeof(struct dlfs));
956 fs->lfs_is64 = false;
957
958 /* Compatibility */
959 if (lfs_sb_getversion(fs) < 2) {
960 lfs_sb_setsumsize(fs, LFS_V1_SUMMARY_SIZE);
961 lfs_sb_setibsize(fs, lfs_sb_getbsize(fs));
962 lfs_sb_sets0addr(fs, lfs_sb_getsboff(fs, 0));
963 lfs_sb_settstamp(fs, lfs_sb_getotstamp(fs));
964 lfs_sb_setfsbtodb(fs, 0);
965 }
966 if (lfs_sb_getresvseg(fs) == 0)
967 lfs_sb_setresvseg(fs, MIN(lfs_sb_getminfreeseg(fs) - 1, \
968 MAX(MIN_RESV_SEGS, lfs_sb_getminfreeseg(fs) / 2 + 1)));
969
970 /*
971 * If we aren't going to be able to write meaningfully to this
972 * filesystem, and were not mounted readonly, bomb out now.
973 */
974 if (lfs_fsbtob(fs, LFS_NRESERVE(fs)) > LFS_MAX_BYTES && !ronly) {
975 DLOG((DLOG_MOUNT, "lfs_mount: to mount this filesystem read/write,"
976 " we need BUFPAGES >= %lld\n",
977 (long long)((bufmem_hiwater / bufmem_lowater) *
978 LFS_INVERSE_MAX_BYTES(
979 lfs_fsbtob(fs, LFS_NRESERVE(fs))) >> PAGE_SHIFT)));
980 kmem_free(fs, sizeof(struct lfs));
981 error = EFBIG; /* XXX needs translation */
982 goto out;
983 }
984
985 /* Before rolling forward, lock so vget will sleep for other procs */
986 if (l != NULL) {
987 fs->lfs_flags = LFS_NOTYET;
988 fs->lfs_rfpid = l->l_proc->p_pid;
989 }
990
991 ump = kmem_zalloc(sizeof(*ump), KM_SLEEP);
992 ump->um_lfs = fs;
993 ump->um_fstype = ULFS1;
994 /* ump->um_cleaner_thread = NULL; */
995 if (sizeof(struct lfs) < LFS_SBPAD) { /* XXX why? */
996 brelse(bp, BC_INVAL);
997 brelse(abp, BC_INVAL);
998 } else {
999 brelse(bp, 0);
1000 brelse(abp, 0);
1001 }
1002 bp = NULL;
1003 abp = NULL;
1004
1005
1006 /* Set up the I/O information */
1007 fs->lfs_devbsize = DEV_BSIZE;
1008 fs->lfs_iocount = 0;
1009 fs->lfs_diropwait = 0;
1010 fs->lfs_activesb = 0;
1011 lfs_sb_setuinodes(fs, 0);
1012 fs->lfs_ravail = 0;
1013 fs->lfs_favail = 0;
1014 fs->lfs_sbactive = 0;
1015
1016 /* Set up the ifile and lock aflags */
1017 fs->lfs_doifile = 0;
1018 fs->lfs_writer = 0;
1019 fs->lfs_dirops = 0;
1020 fs->lfs_nadirop = 0;
1021 fs->lfs_seglock = 0;
1022 fs->lfs_pdflush = 0;
1023 fs->lfs_sleepers = 0;
1024 fs->lfs_pages = 0;
1025 rw_init(&fs->lfs_fraglock);
1026 rw_init(&fs->lfs_iflock);
1027 cv_init(&fs->lfs_stopcv, "lfsstop");
1028
1029 /* Set the file system readonly/modify bits. */
1030 fs->lfs_ronly = ronly;
1031 if (ronly == 0)
1032 fs->lfs_fmod = 1;
1033
1034 /* ulfs-level information */
1035 fs->um_flags = 0;
1036 fs->um_bptrtodb = lfs_sb_getffshift(fs) - DEV_BSHIFT;
1037 fs->um_seqinc = lfs_sb_getfrag(fs);
1038 fs->um_nindir = lfs_sb_getnindir(fs);
1039 fs->um_lognindir = ffs(lfs_sb_getnindir(fs)) - 1;
1040 fs->um_maxsymlinklen = lfs_sb_getmaxsymlinklen(fs);
1041 fs->um_dirblksiz = LFS_DIRBLKSIZ;
1042 fs->um_maxfilesize = lfs_sb_getmaxfilesize(fs);
1043
1044 /* quota stuff */
1045 /* XXX: these need to come from the on-disk superblock to be used */
1046 fs->lfs_use_quota2 = 0;
1047 fs->lfs_quota_magic = 0;
1048 fs->lfs_quota_flags = 0;
1049 fs->lfs_quotaino[0] = 0;
1050 fs->lfs_quotaino[1] = 0;
1051
1052 /* Initialize the mount structure. */
1053 dev = devvp->v_rdev;
1054 mp->mnt_data = ump;
1055 mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
1056 mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_LFS);
1057 mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
1058 mp->mnt_stat.f_namemax = LFS_MAXNAMLEN;
1059 mp->mnt_stat.f_iosize = lfs_sb_getbsize(fs);
1060 mp->mnt_flag |= MNT_LOCAL;
1061 mp->mnt_fs_bshift = lfs_sb_getbshift(fs);
1062 if (fs->um_maxsymlinklen > 0)
1063 mp->mnt_iflag |= IMNT_DTYPE;
1064
1065 ump->um_mountp = mp;
1066 ump->um_dev = dev;
1067 ump->um_devvp = devvp;
1068 for (i = 0; i < ULFS_MAXQUOTAS; i++)
1069 ump->um_quotas[i] = NULLVP;
1070 spec_node_setmountedfs(devvp, mp);
1071
1072 /* Set up reserved memory for pageout */
1073 lfs_setup_resblks(fs);
1074 /* Set up vdirop tailq */
1075 TAILQ_INIT(&fs->lfs_dchainhd);
1076 /* and paging tailq */
1077 TAILQ_INIT(&fs->lfs_pchainhd);
1078 /* and delayed segment accounting for truncation list */
1079 LIST_INIT(&fs->lfs_segdhd);
1080
1081 /*
1082 * We use the ifile vnode for almost every operation. Instead of
1083 * retrieving it from the hash table each time we retrieve it here,
1084 * artificially increment the reference count and keep a pointer
1085 * to it in the incore copy of the superblock.
1086 */
1087 if ((error = VFS_VGET(mp, LFS_IFILE_INUM, &vp)) != 0) {
1088 DLOG((DLOG_MOUNT, "lfs_mountfs: ifile vget failed, error=%d\n", error));
1089 goto out;
1090 }
1091 fs->lfs_ivnode = vp;
1092 vref(vp);
1093
1094 /* Set up inode bitmap and order free list */
1095 lfs_order_freelist(fs);
1096
1097 /* Set up segment usage flags for the autocleaner. */
1098 fs->lfs_nactive = 0;
1099 fs->lfs_suflags = malloc(2 * sizeof(u_int32_t *),
1100 M_SEGMENT, M_WAITOK);
1101 fs->lfs_suflags[0] = malloc(lfs_sb_getnseg(fs) * sizeof(u_int32_t),
1102 M_SEGMENT, M_WAITOK);
1103 fs->lfs_suflags[1] = malloc(lfs_sb_getnseg(fs) * sizeof(u_int32_t),
1104 M_SEGMENT, M_WAITOK);
1105 memset(fs->lfs_suflags[1], 0, lfs_sb_getnseg(fs) * sizeof(u_int32_t));
1106 for (i = 0; i < lfs_sb_getnseg(fs); i++) {
1107 int changed;
1108
1109 LFS_SEGENTRY(sup, fs, i, bp);
1110 changed = 0;
1111 if (!ronly) {
1112 if (sup->su_nbytes == 0 &&
1113 !(sup->su_flags & SEGUSE_EMPTY)) {
1114 sup->su_flags |= SEGUSE_EMPTY;
1115 ++changed;
1116 } else if (!(sup->su_nbytes == 0) &&
1117 (sup->su_flags & SEGUSE_EMPTY)) {
1118 sup->su_flags &= ~SEGUSE_EMPTY;
1119 ++changed;
1120 }
1121 if (sup->su_flags & (SEGUSE_ACTIVE|SEGUSE_INVAL)) {
1122 sup->su_flags &= ~(SEGUSE_ACTIVE|SEGUSE_INVAL);
1123 ++changed;
1124 }
1125 }
1126 fs->lfs_suflags[0][i] = sup->su_flags;
1127 if (changed)
1128 LFS_WRITESEGENTRY(sup, fs, i, bp);
1129 else
1130 brelse(bp, 0);
1131 }
1132
1133 /*
1134 * XXX: if the fs has quotas, quotas should be on even if
1135 * readonly. Otherwise you can't query the quota info!
1136 * However, that's not how the quota2 code got written and I
1137 * don't know if it'll behave itself if enabled while
1138 * readonly, so for now use the same enable logic as ffs.
1139 *
1140 * XXX: also, if you use the -f behavior allowed here (and
1141 * equivalently above for remount) it will corrupt the fs. It
1142 * ought not to allow that. It should allow mounting readonly
1143 * if there are quotas and the kernel doesn't have the quota
1144 * code, but only readonly.
1145 *
1146 * XXX: and if you use the -f behavior allowed here it will
1147 * likely crash at unmount time (or remount time) because we
1148 * think quotas are active.
1149 *
1150 * Although none of this applies until there's a way to set
1151 * lfs_use_quota2 and have quotas in the fs at all.
1152 */
1153 if (!ronly && fs->lfs_use_quota2) {
1154 #ifdef LFS_QUOTA2
1155 error = lfs_quota2_mount(mp);
1156 #else
1157 uprintf("%s: no kernel support for this filesystem's quotas\n",
1158 mp->mnt_stat.f_mntonname);
1159 if (mp->mnt_flag & MNT_FORCE) {
1160 uprintf("%s: mounting anyway; fsck afterwards\n",
1161 mp->mnt_stat.f_mntonname);
1162 } else {
1163 error = EINVAL;
1164 }
1165 #endif
1166 if (error) {
1167 /* XXX XXX must clean up the stuff immediately above */
1168 printf("lfs_mountfs: sorry, leaking some memory\n");
1169 goto out;
1170 }
1171 }
1172
1173 #ifdef LFS_EXTATTR
1174 /*
1175 * Initialize file-backed extended attributes for ULFS1 file
1176 * systems.
1177 *
1178 * XXX: why is this limited to ULFS1?
1179 */
1180 if (ump->um_fstype == ULFS1) {
1181 ulfs_extattr_uepm_init(&ump->um_extattr);
1182 }
1183 #endif
1184
1185 #ifdef LFS_KERNEL_RFW
1186 lfs_roll_forward(fs, mp, l);
1187 #endif
1188
1189 /* If writing, sb is not clean; record in case of immediate crash */
1190 if (!fs->lfs_ronly) {
1191 lfs_sb_setpflags(fs, lfs_sb_getpflags(fs) & ~LFS_PF_CLEAN);
1192 lfs_writesuper(fs, lfs_sb_getsboff(fs, 0));
1193 lfs_writesuper(fs, lfs_sb_getsboff(fs, 1));
1194 }
1195
1196 /* Allow vget now that roll-forward is complete */
1197 fs->lfs_flags &= ~(LFS_NOTYET);
1198 wakeup(&fs->lfs_flags);
1199
1200 /*
1201 * Initialize the ifile cleaner info with information from
1202 * the superblock.
1203 */
1204 LFS_CLEANERINFO(cip, fs, bp);
1205 lfs_ci_setclean(fs, cip, lfs_sb_getnclean(fs));
1206 lfs_ci_setdirty(fs, cip, lfs_sb_getnseg(fs) - lfs_sb_getnclean(fs));
1207 lfs_ci_setavail(fs, cip, lfs_sb_getavail(fs));
1208 lfs_ci_setbfree(fs, cip, lfs_sb_getbfree(fs));
1209 (void) LFS_BWRITE_LOG(bp); /* Ifile */
1210
1211 /*
1212 * Mark the current segment as ACTIVE, since we're going to
1213 * be writing to it.
1214 */
1215 LFS_SEGENTRY(sup, fs, lfs_dtosn(fs, lfs_sb_getoffset(fs)), bp);
1216 sup->su_flags |= SEGUSE_DIRTY | SEGUSE_ACTIVE;
1217 fs->lfs_nactive++;
1218 LFS_WRITESEGENTRY(sup, fs, lfs_dtosn(fs, lfs_sb_getoffset(fs)), bp); /* Ifile */
1219
1220 /* Now that roll-forward is done, unlock the Ifile */
1221 vput(vp);
1222
1223 /* Start the pagedaemon-anticipating daemon */
1224 mutex_enter(&lfs_lock);
1225 if (lfs_writer_daemon == 0 && lfs_writer_lid == 0 &&
1226 kthread_create(PRI_BIO, 0, NULL,
1227 lfs_writerd, NULL, NULL, "lfs_writer") != 0)
1228 panic("fork lfs_writer");
1229 mutex_exit(&lfs_lock);
1230
1231 printf("WARNING: the log-structured file system is experimental\n"
1232 "WARNING: it may cause system crashes and/or corrupt data\n");
1233
1234 return (0);
1235
1236 out:
1237 if (bp)
1238 brelse(bp, 0);
1239 if (abp)
1240 brelse(abp, 0);
1241 if (ump) {
1242 kmem_free(ump->um_lfs, sizeof(struct lfs));
1243 kmem_free(ump, sizeof(*ump));
1244 mp->mnt_data = NULL;
1245 }
1246
1247 return (error);
1248 }
1249
1250 /*
1251 * unmount system call
1252 */
1253 int
1254 lfs_unmount(struct mount *mp, int mntflags)
1255 {
1256 struct lwp *l = curlwp;
1257 struct ulfsmount *ump;
1258 struct lfs *fs;
1259 int error, flags, ronly;
1260 vnode_t *vp;
1261
1262 flags = 0;
1263 if (mntflags & MNT_FORCE)
1264 flags |= FORCECLOSE;
1265
1266 ump = VFSTOULFS(mp);
1267 fs = ump->um_lfs;
1268
1269 /* Two checkpoints */
1270 lfs_segwrite(mp, SEGM_CKP | SEGM_SYNC);
1271 lfs_segwrite(mp, SEGM_CKP | SEGM_SYNC);
1272
1273 /* wake up the cleaner so it can die */
1274 /* XXX: shouldn't this be *after* the error cases below? */
1275 lfs_wakeup_cleaner(fs);
1276 mutex_enter(&lfs_lock);
1277 while (fs->lfs_sleepers)
1278 mtsleep(&fs->lfs_sleepers, PRIBIO + 1, "lfs_sleepers", 0,
1279 &lfs_lock);
1280 mutex_exit(&lfs_lock);
1281
1282 #ifdef LFS_EXTATTR
1283 if (ump->um_fstype == ULFS1) {
1284 if (ump->um_extattr.uepm_flags & ULFS_EXTATTR_UEPM_STARTED) {
1285 ulfs_extattr_stop(mp, curlwp);
1286 }
1287 if (ump->um_extattr.uepm_flags & ULFS_EXTATTR_UEPM_INITIALIZED) {
1288 ulfs_extattr_uepm_destroy(&ump->um_extattr);
1289 }
1290 }
1291 #endif
1292 #ifdef LFS_QUOTA
1293 if ((error = lfsquota1_umount(mp, flags)) != 0)
1294 return (error);
1295 #endif
1296 #ifdef LFS_QUOTA2
1297 if ((error = lfsquota2_umount(mp, flags)) != 0)
1298 return (error);
1299 #endif
1300 if ((error = vflush(mp, fs->lfs_ivnode, flags)) != 0)
1301 return (error);
1302 if ((error = VFS_SYNC(mp, 1, l->l_cred)) != 0)
1303 return (error);
1304 vp = fs->lfs_ivnode;
1305 mutex_enter(vp->v_interlock);
1306 if (LIST_FIRST(&vp->v_dirtyblkhd))
1307 panic("lfs_unmount: still dirty blocks on ifile vnode");
1308 mutex_exit(vp->v_interlock);
1309
1310 /* Explicitly write the superblock, to update serial and pflags */
1311 lfs_sb_setpflags(fs, lfs_sb_getpflags(fs) | LFS_PF_CLEAN);
1312 lfs_writesuper(fs, lfs_sb_getsboff(fs, 0));
1313 lfs_writesuper(fs, lfs_sb_getsboff(fs, 1));
1314 mutex_enter(&lfs_lock);
1315 while (fs->lfs_iocount)
1316 mtsleep(&fs->lfs_iocount, PRIBIO + 1, "lfs_umount", 0,
1317 &lfs_lock);
1318 mutex_exit(&lfs_lock);
1319
1320 /* Finish with the Ifile, now that we're done with it */
1321 vgone(fs->lfs_ivnode);
1322
1323 ronly = !fs->lfs_ronly;
1324 if (ump->um_devvp->v_type != VBAD)
1325 spec_node_setmountedfs(ump->um_devvp, NULL);
1326 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1327 error = VOP_CLOSE(ump->um_devvp,
1328 ronly ? FREAD : FREAD|FWRITE, NOCRED);
1329 vput(ump->um_devvp);
1330
1331 /* Complain about page leakage */
1332 if (fs->lfs_pages > 0)
1333 printf("lfs_unmount: still claim %d pages (%d in subsystem)\n",
1334 fs->lfs_pages, lfs_subsys_pages);
1335
1336 /* Free per-mount data structures */
1337 free(fs->lfs_ino_bitmap, M_SEGMENT);
1338 free(fs->lfs_suflags[0], M_SEGMENT);
1339 free(fs->lfs_suflags[1], M_SEGMENT);
1340 free(fs->lfs_suflags, M_SEGMENT);
1341 lfs_free_resblks(fs);
1342 cv_destroy(&fs->lfs_stopcv);
1343 rw_destroy(&fs->lfs_fraglock);
1344 rw_destroy(&fs->lfs_iflock);
1345
1346 kmem_free(fs, sizeof(struct lfs));
1347 kmem_free(ump, sizeof(*ump));
1348
1349 mp->mnt_data = NULL;
1350 mp->mnt_flag &= ~MNT_LOCAL;
1351 return (error);
1352 }
1353
1354 /*
1355 * Get file system statistics.
1356 *
1357 * NB: We don't lock to access the superblock here, because it's not
1358 * really that important if we get it wrong.
1359 */
1360 int
1361 lfs_statvfs(struct mount *mp, struct statvfs *sbp)
1362 {
1363 struct lfs *fs;
1364 struct ulfsmount *ump;
1365
1366 ump = VFSTOULFS(mp);
1367 fs = ump->um_lfs;
1368
1369 sbp->f_bsize = lfs_sb_getbsize(fs);
1370 sbp->f_frsize = lfs_sb_getfsize(fs);
1371 sbp->f_iosize = lfs_sb_getbsize(fs);
1372 sbp->f_blocks = LFS_EST_NONMETA(fs) - VTOI(fs->lfs_ivnode)->i_lfs_effnblks;
1373
1374 sbp->f_bfree = LFS_EST_BFREE(fs);
1375 /*
1376 * XXX this should be lfs_sb_getsize (measured in frags)
1377 * rather than dsize (measured in diskblocks). However,
1378 * getsize needs a format version check (for version 1 it
1379 * needs to be blockstofrags'd) so for the moment I'm going to
1380 * leave this... it won't fire wrongly as frags are at least
1381 * as big as diskblocks.
1382 */
1383 KASSERT(sbp->f_bfree <= lfs_sb_getdsize(fs));
1384 #if 0
1385 if (sbp->f_bfree < 0)
1386 sbp->f_bfree = 0;
1387 #endif
1388
1389 sbp->f_bresvd = LFS_EST_RSVD(fs);
1390 if (sbp->f_bfree > sbp->f_bresvd)
1391 sbp->f_bavail = sbp->f_bfree - sbp->f_bresvd;
1392 else
1393 sbp->f_bavail = 0;
1394
1395 /* XXX: huh? - dholland 20150728 */
1396 sbp->f_files = lfs_sb_getbfree(fs) / lfs_btofsb(fs, lfs_sb_getibsize(fs))
1397 * LFS_INOPB(fs);
1398 sbp->f_ffree = sbp->f_files - lfs_sb_getnfiles(fs);
1399 sbp->f_favail = sbp->f_ffree;
1400 sbp->f_fresvd = 0;
1401 copy_statvfs_info(sbp, mp);
1402 return (0);
1403 }
1404
1405 /*
1406 * Go through the disk queues to initiate sandbagged IO;
1407 * go through the inodes to write those that have been modified;
1408 * initiate the writing of the super block if it has been modified.
1409 *
1410 * Note: we are always called with the filesystem marked `MPBUSY'.
1411 */
1412 int
1413 lfs_sync(struct mount *mp, int waitfor, kauth_cred_t cred)
1414 {
1415 int error;
1416 struct lfs *fs;
1417
1418 fs = VFSTOULFS(mp)->um_lfs;
1419 if (fs->lfs_ronly)
1420 return 0;
1421
1422 /* Snapshots should not hose the syncer */
1423 /*
1424 * XXX Sync can block here anyway, since we don't have a very
1425 * XXX good idea of how much data is pending. If it's more
1426 * XXX than a segment and lfs_nextseg is close to the end of
1427 * XXX the log, we'll likely block.
1428 */
1429 mutex_enter(&lfs_lock);
1430 if (fs->lfs_nowrap && lfs_sb_getnextseg(fs) < lfs_sb_getcurseg(fs)) {
1431 mutex_exit(&lfs_lock);
1432 return 0;
1433 }
1434 mutex_exit(&lfs_lock);
1435
1436 lfs_writer_enter(fs, "lfs_dirops");
1437
1438 /* All syncs must be checkpoints until roll-forward is implemented. */
1439 DLOG((DLOG_FLUSH, "lfs_sync at 0x%jx\n",
1440 (uintmax_t)lfs_sb_getoffset(fs)));
1441 error = lfs_segwrite(mp, SEGM_CKP | (waitfor ? SEGM_SYNC : 0));
1442 lfs_writer_leave(fs);
1443 #ifdef LFS_QUOTA
1444 lfs_qsync(mp);
1445 #endif
1446 return (error);
1447 }
1448
1449 /*
1450 * Look up an LFS dinode number to find its incore vnode. If not already
1451 * in core, read it in from the specified device. Return the inode locked.
1452 * Detection and handling of mount points must be done by the calling routine.
1453 */
1454 int
1455 lfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
1456 {
1457 int error;
1458
1459 error = vcache_get(mp, &ino, sizeof(ino), vpp);
1460 if (error)
1461 return error;
1462 error = vn_lock(*vpp, LK_EXCLUSIVE);
1463 if (error) {
1464 vrele(*vpp);
1465 *vpp = NULL;
1466 return error;
1467 }
1468
1469 return 0;
1470 }
1471
1472 /*
1473 * Create a new vnode/inode pair and initialize what fields we can.
1474 */
1475 static void
1476 lfs_init_vnode(struct ulfsmount *ump, ino_t ino, struct vnode *vp)
1477 {
1478 struct lfs *fs = ump->um_lfs;
1479 struct inode *ip;
1480 union lfs_dinode *dp;
1481
1482 ASSERT_NO_SEGLOCK(ump->um_lfs);
1483
1484 /* Initialize the inode. */
1485 ip = pool_get(&lfs_inode_pool, PR_WAITOK);
1486 memset(ip, 0, sizeof(*ip));
1487 dp = pool_get(&lfs_dinode_pool, PR_WAITOK);
1488 memset(dp, 0, sizeof(*dp));
1489 ip->inode_ext.lfs = pool_get(&lfs_inoext_pool, PR_WAITOK);
1490 memset(ip->inode_ext.lfs, 0, sizeof(*ip->inode_ext.lfs));
1491 ip->i_din = dp;
1492 ip->i_ump = ump;
1493 ip->i_vnode = vp;
1494 ip->i_dev = ump->um_dev;
1495 lfs_dino_setinumber(fs, dp, ino);
1496 ip->i_number = ino;
1497 ip->i_lfs = ump->um_lfs;
1498 ip->i_lfs_effnblks = 0;
1499 SPLAY_INIT(&ip->i_lfs_lbtree);
1500 ip->i_lfs_nbtree = 0;
1501 LIST_INIT(&ip->i_lfs_segdhd);
1502
1503 vp->v_tag = VT_LFS;
1504 vp->v_op = lfs_vnodeop_p;
1505 vp->v_data = ip;
1506 }
1507
1508 /*
1509 * Undo lfs_init_vnode().
1510 */
1511 static void
1512 lfs_deinit_vnode(struct ulfsmount *ump, struct vnode *vp)
1513 {
1514 struct inode *ip = VTOI(vp);
1515
1516 pool_put(&lfs_inoext_pool, ip->inode_ext.lfs);
1517 pool_put(&lfs_dinode_pool, ip->i_din);
1518 pool_put(&lfs_inode_pool, ip);
1519 vp->v_data = NULL;
1520 }
1521
1522 /*
1523 * Read an inode from disk and initialize this vnode / inode pair.
1524 * Caller assures no other thread will try to load this inode.
1525 */
1526 int
1527 lfs_loadvnode(struct mount *mp, struct vnode *vp,
1528 const void *key, size_t key_len, const void **new_key)
1529 {
1530 struct lfs *fs;
1531 union lfs_dinode *dip;
1532 struct inode *ip;
1533 struct buf *bp;
1534 IFILE *ifp;
1535 struct ulfsmount *ump;
1536 ino_t ino;
1537 daddr_t daddr;
1538 int error, retries;
1539 struct timespec ts;
1540
1541 KASSERT(key_len == sizeof(ino));
1542 memcpy(&ino, key, key_len);
1543
1544 memset(&ts, 0, sizeof ts); /* XXX gcc */
1545
1546 ump = VFSTOULFS(mp);
1547 fs = ump->um_lfs;
1548
1549 /*
1550 * If the filesystem is not completely mounted yet, suspend
1551 * any access requests (wait for roll-forward to complete).
1552 */
1553 mutex_enter(&lfs_lock);
1554 while ((fs->lfs_flags & LFS_NOTYET) && curproc->p_pid != fs->lfs_rfpid)
1555 mtsleep(&fs->lfs_flags, PRIBIO+1, "lfs_notyet", 0,
1556 &lfs_lock);
1557 mutex_exit(&lfs_lock);
1558
1559 /* Translate the inode number to a disk address. */
1560 if (ino == LFS_IFILE_INUM)
1561 daddr = lfs_sb_getidaddr(fs);
1562 else {
1563 /* XXX bounds-check this too */
1564 LFS_IENTRY(ifp, fs, ino, bp);
1565 daddr = lfs_if_getdaddr(fs, ifp);
1566 if (lfs_sb_getversion(fs) > 1) {
1567 ts.tv_sec = lfs_if_getatime_sec(fs, ifp);
1568 ts.tv_nsec = lfs_if_getatime_nsec(fs, ifp);
1569 }
1570
1571 brelse(bp, 0);
1572 if (daddr == LFS_UNUSED_DADDR)
1573 return (ENOENT);
1574 }
1575
1576 /* Allocate/init new vnode/inode. */
1577 lfs_init_vnode(ump, ino, vp);
1578 ip = VTOI(vp);
1579
1580 /* If the cleaner supplied the inode, use it. */
1581 if (curlwp == ump->um_cleaner_thread && ump->um_cleaner_hint != NULL &&
1582 ump->um_cleaner_hint->bi_lbn == LFS_UNUSED_LBN) {
1583 dip = ump->um_cleaner_hint->bi_bp;
1584 if (fs->lfs_is64) {
1585 error = copyin(dip, &ip->i_din->u_64,
1586 sizeof(struct lfs64_dinode));
1587 } else {
1588 error = copyin(dip, &ip->i_din->u_32,
1589 sizeof(struct lfs32_dinode));
1590 }
1591 if (error) {
1592 lfs_deinit_vnode(ump, vp);
1593 return error;
1594 }
1595 KASSERT(ip->i_number == ino);
1596 goto out;
1597 }
1598
1599 /* Read in the disk contents for the inode, copy into the inode. */
1600 retries = 0;
1601 again:
1602 error = bread(ump->um_devvp, LFS_FSBTODB(fs, daddr),
1603 (lfs_sb_getversion(fs) == 1 ? lfs_sb_getbsize(fs) : lfs_sb_getibsize(fs)),
1604 0, &bp);
1605 if (error) {
1606 lfs_deinit_vnode(ump, vp);
1607 return error;
1608 }
1609
1610 dip = lfs_ifind(fs, ino, bp);
1611 if (dip == NULL) {
1612 /* Assume write has not completed yet; try again */
1613 brelse(bp, BC_INVAL);
1614 ++retries;
1615 if (retries <= LFS_IFIND_RETRIES) {
1616 mutex_enter(&lfs_lock);
1617 if (fs->lfs_iocount) {
1618 DLOG((DLOG_VNODE,
1619 "%s: dinode %d not found, retrying...\n",
1620 __func__, ino));
1621 (void)mtsleep(&fs->lfs_iocount, PRIBIO + 1,
1622 "lfs ifind", 1, &lfs_lock);
1623 } else
1624 retries = LFS_IFIND_RETRIES;
1625 mutex_exit(&lfs_lock);
1626 goto again;
1627 }
1628 #ifdef DEBUG
1629 /* If the seglock is held look at the bpp to see
1630 what is there anyway */
1631 mutex_enter(&lfs_lock);
1632 if (fs->lfs_seglock > 0) {
1633 struct buf **bpp;
1634 union lfs_dinode *dp;
1635 int i;
1636
1637 for (bpp = fs->lfs_sp->bpp;
1638 bpp != fs->lfs_sp->cbpp; ++bpp) {
1639 if ((*bpp)->b_vp == fs->lfs_ivnode &&
1640 bpp != fs->lfs_sp->bpp) {
1641 /* Inode block */
1642 printf("%s: block 0x%" PRIx64 ": ",
1643 __func__, (*bpp)->b_blkno);
1644 for (i = 0; i < LFS_INOPB(fs); i++) {
1645 dp = DINO_IN_BLOCK(fs,
1646 (*bpp)->b_data, i);
1647 if (lfs_dino_getinumber(fs, dp))
1648 printf("%ju ",
1649 (uintmax_t)lfs_dino_getinumber(fs, dp));
1650 }
1651 printf("\n");
1652 }
1653 }
1654 }
1655 mutex_exit(&lfs_lock);
1656 #endif /* DEBUG */
1657 panic("lfs_loadvnode: dinode not found");
1658 }
1659 lfs_copy_dinode(fs, ip->i_din, dip);
1660 brelse(bp, 0);
1661
1662 out:
1663 if (lfs_sb_getversion(fs) > 1) {
1664 ip->i_ffs1_atime = ts.tv_sec;
1665 ip->i_ffs1_atimensec = ts.tv_nsec;
1666 }
1667
1668 lfs_vinit(mp, &vp);
1669
1670 *new_key = &ip->i_number;
1671 return 0;
1672 }
1673
1674 /*
1675 * Create a new inode and initialize this vnode / inode pair.
1676 */
1677 int
1678 lfs_newvnode(struct mount *mp, struct vnode *dvp, struct vnode *vp,
1679 struct vattr *vap, kauth_cred_t cred,
1680 size_t *key_len, const void **new_key)
1681 {
1682 ino_t ino;
1683 struct inode *ip;
1684 struct ulfsmount *ump;
1685 struct lfs *fs;
1686 int error, mode, gen;
1687
1688 KASSERT(dvp != NULL || vap->va_fileid > 0);
1689 KASSERT(dvp != NULL && dvp->v_mount == mp);
1690 KASSERT(vap->va_type != VNON);
1691
1692 *key_len = sizeof(ino);
1693 ump = VFSTOULFS(mp);
1694 fs = ump->um_lfs;
1695 mode = MAKEIMODE(vap->va_type, vap->va_mode);
1696
1697 /*
1698 * Allocate fresh inode. With "dvp == NULL" take the inode number
1699 * and version from "vap".
1700 */
1701 if (dvp == NULL) {
1702 ino = vap->va_fileid;
1703 gen = vap->va_gen;
1704 error = lfs_valloc_fixed(fs, ino, gen);
1705 } else {
1706 error = lfs_valloc(dvp, mode, cred, &ino, &gen);
1707 }
1708 if (error)
1709 return error;
1710
1711 /* Attach inode to vnode. */
1712 lfs_init_vnode(ump, ino, vp);
1713 ip = VTOI(vp);
1714
1715 mutex_enter(&lfs_lock);
1716 LFS_SET_UINO(ip, IN_CHANGE);
1717 mutex_exit(&lfs_lock);
1718
1719 /* Note no blocks yet */
1720 ip->i_lfs_hiblk = -1;
1721
1722 /* Set a new generation number for this inode. */
1723 ip->i_gen = gen;
1724 ip->i_ffs1_gen = gen;
1725
1726 memset(ip->i_lfs_fragsize, 0,
1727 ULFS_NDADDR * sizeof(*ip->i_lfs_fragsize));
1728
1729 /* Set uid / gid. */
1730 if (cred == NOCRED || cred == FSCRED) {
1731 ip->i_gid = 0;
1732 ip->i_uid = 0;
1733 } else {
1734 ip->i_gid = VTOI(dvp)->i_gid;
1735 ip->i_uid = kauth_cred_geteuid(cred);
1736 }
1737 DIP_ASSIGN(ip, gid, ip->i_gid);
1738 DIP_ASSIGN(ip, uid, ip->i_uid);
1739
1740 #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
1741 error = lfs_chkiq(ip, 1, cred, 0);
1742 if (error) {
1743 lfs_vfree(dvp, ino, mode);
1744 lfs_deinit_vnode(ump, vp);
1745
1746 return error;
1747 }
1748 #endif
1749
1750 /* Set type and finalize. */
1751 ip->i_flags = 0;
1752 DIP_ASSIGN(ip, flags, 0);
1753 ip->i_mode = mode;
1754 DIP_ASSIGN(ip, mode, mode);
1755 if (vap->va_rdev != VNOVAL) {
1756 /*
1757 * Want to be able to use this to make badblock
1758 * inodes, so don't truncate the dev number.
1759 */
1760 if (ump->um_fstype == ULFS1)
1761 ip->i_ffs1_rdev = ulfs_rw32(vap->va_rdev,
1762 ULFS_MPNEEDSWAP(fs));
1763 else
1764 ip->i_ffs2_rdev = ulfs_rw64(vap->va_rdev,
1765 ULFS_MPNEEDSWAP(fs));
1766 }
1767 lfs_vinit(mp, &vp);
1768
1769 *new_key = &ip->i_number;
1770 return 0;
1771 }
1772
1773 /*
1774 * File handle to vnode
1775 */
1776 int
1777 lfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
1778 {
1779 struct lfid lfh;
1780 struct lfs *fs;
1781
1782 if (fhp->fid_len != sizeof(struct lfid))
1783 return EINVAL;
1784
1785 memcpy(&lfh, fhp, sizeof(lfh));
1786 if (lfh.lfid_ino < LFS_IFILE_INUM)
1787 return ESTALE;
1788
1789 fs = VFSTOULFS(mp)->um_lfs;
1790 if (lfh.lfid_ident != lfs_sb_getident(fs))
1791 return ESTALE;
1792
1793 if (lfh.lfid_ino >
1794 ((VTOI(fs->lfs_ivnode)->i_ffs1_size >> lfs_sb_getbshift(fs)) -
1795 lfs_sb_getcleansz(fs) - lfs_sb_getsegtabsz(fs)) * lfs_sb_getifpb(fs))
1796 return ESTALE;
1797
1798 return (ulfs_fhtovp(mp, &lfh.lfid_ufid, vpp));
1799 }
1800
1801 /*
1802 * Vnode pointer to File handle
1803 */
1804 /* ARGSUSED */
1805 int
1806 lfs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
1807 {
1808 struct inode *ip;
1809 struct lfid lfh;
1810
1811 if (*fh_size < sizeof(struct lfid)) {
1812 *fh_size = sizeof(struct lfid);
1813 return E2BIG;
1814 }
1815 *fh_size = sizeof(struct lfid);
1816 ip = VTOI(vp);
1817 memset(&lfh, 0, sizeof(lfh));
1818 lfh.lfid_len = sizeof(struct lfid);
1819 lfh.lfid_ino = ip->i_number;
1820 lfh.lfid_gen = ip->i_gen;
1821 lfh.lfid_ident = lfs_sb_getident(ip->i_lfs);
1822 memcpy(fhp, &lfh, sizeof(lfh));
1823 return (0);
1824 }
1825
1826 /*
1827 * ulfs_bmaparray callback function for writing.
1828 *
1829 * Since blocks will be written to the new segment anyway,
1830 * we don't care about current daddr of them.
1831 */
1832 static bool
1833 lfs_issequential_hole(const struct lfs *fs,
1834 daddr_t daddr0, daddr_t daddr1)
1835 {
1836 (void)fs; /* not used */
1837
1838 daddr0 = (daddr_t)((int32_t)daddr0); /* XXX ondisk32 */
1839 daddr1 = (daddr_t)((int32_t)daddr1); /* XXX ondisk32 */
1840
1841 KASSERT(daddr0 == UNWRITTEN ||
1842 (0 <= daddr0 && daddr0 <= LFS_MAX_DADDR(fs)));
1843 KASSERT(daddr1 == UNWRITTEN ||
1844 (0 <= daddr1 && daddr1 <= LFS_MAX_DADDR(fs)));
1845
1846 /* NOTE: all we want to know here is 'hole or not'. */
1847 /* NOTE: UNASSIGNED is converted to 0 by ulfs_bmaparray. */
1848
1849 /*
1850 * treat UNWRITTENs and all resident blocks as 'contiguous'
1851 */
1852 if (daddr0 != 0 && daddr1 != 0)
1853 return true;
1854
1855 /*
1856 * both are in hole?
1857 */
1858 if (daddr0 == 0 && daddr1 == 0)
1859 return true; /* all holes are 'contiguous' for us. */
1860
1861 return false;
1862 }
1863
1864 /*
1865 * lfs_gop_write functions exactly like genfs_gop_write, except that
1866 * (1) it requires the seglock to be held by its caller, and sp->fip
1867 * to be properly initialized (it will return without re-initializing
1868 * sp->fip, and without calling lfs_writeseg).
1869 * (2) it uses the remaining space in the segment, rather than VOP_BMAP,
1870 * to determine how large a block it can write at once (though it does
1871 * still use VOP_BMAP to find holes in the file);
1872 * (3) it calls lfs_gatherblock instead of VOP_STRATEGY on its blocks
1873 * (leaving lfs_writeseg to deal with the cluster blocks, so we might
1874 * now have clusters of clusters, ick.)
1875 */
1876 static int
1877 lfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
1878 int flags)
1879 {
1880 int i, error, run, haveeof = 0;
1881 int fs_bshift;
1882 vaddr_t kva;
1883 off_t eof, offset, startoffset = 0;
1884 size_t bytes, iobytes, skipbytes;
1885 bool async = (flags & PGO_SYNCIO) == 0;
1886 daddr_t lbn, blkno;
1887 struct vm_page *pg;
1888 struct buf *mbp, *bp;
1889 struct vnode *devvp = VTOI(vp)->i_devvp;
1890 struct inode *ip = VTOI(vp);
1891 struct lfs *fs = ip->i_lfs;
1892 struct segment *sp = fs->lfs_sp;
1893 SEGSUM *ssp;
1894 UVMHIST_FUNC("lfs_gop_write"); UVMHIST_CALLED(ubchist);
1895 const char * failreason = NULL;
1896
1897 ASSERT_SEGLOCK(fs);
1898
1899 /* The Ifile lives in the buffer cache */
1900 KASSERT(vp != fs->lfs_ivnode);
1901
1902 /*
1903 * We don't want to fill the disk before the cleaner has a chance
1904 * to make room for us. If we're in danger of doing that, fail
1905 * with EAGAIN. The caller will have to notice this, unlock
1906 * so the cleaner can run, relock and try again.
1907 *
1908 * We must write everything, however, if our vnode is being
1909 * reclaimed.
1910 */
1911 mutex_enter(vp->v_interlock);
1912 if (LFS_STARVED_FOR_SEGS(fs) && vdead_check(vp, VDEAD_NOWAIT) == 0) {
1913 mutex_exit(vp->v_interlock);
1914 failreason = "Starved for segs and not flushing vp";
1915 goto tryagain;
1916 }
1917 mutex_exit(vp->v_interlock);
1918
1919 /*
1920 * Sometimes things slip past the filters in lfs_putpages,
1921 * and the pagedaemon tries to write pages---problem is
1922 * that the pagedaemon never acquires the segment lock.
1923 *
1924 * Alternatively, pages that were clean when we called
1925 * genfs_putpages may have become dirty in the meantime. In this
1926 * case the segment header is not properly set up for blocks
1927 * to be added to it.
1928 *
1929 * Unbusy and unclean the pages, and put them on the ACTIVE
1930 * queue under the hypothesis that they couldn't have got here
1931 * unless they were modified *quite* recently.
1932 *
1933 * XXXUBC that last statement is an oversimplification of course.
1934 */
1935 if (!LFS_SEGLOCK_HELD(fs)) {
1936 failreason = "Seglock not held";
1937 goto tryagain;
1938 }
1939 if (ip->i_lfs_iflags & LFSI_NO_GOP_WRITE) {
1940 failreason = "Inode with no_gop_write";
1941 goto tryagain;
1942 }
1943 if ((pgs[0]->offset & lfs_sb_getbmask(fs)) != 0) {
1944 failreason = "Bad page offset";
1945 goto tryagain;
1946 }
1947
1948 UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
1949 vp, pgs, npages, flags);
1950
1951 GOP_SIZE(vp, vp->v_size, &eof, 0);
1952 haveeof = 1;
1953
1954 if (vp->v_type == VREG)
1955 fs_bshift = vp->v_mount->mnt_fs_bshift;
1956 else
1957 fs_bshift = DEV_BSHIFT;
1958 error = 0;
1959 pg = pgs[0];
1960 startoffset = pg->offset;
1961 KASSERT(eof >= 0);
1962
1963 if (startoffset >= eof) {
1964 failreason = "Offset beyond EOF";
1965 goto tryagain;
1966 } else
1967 bytes = MIN(npages << PAGE_SHIFT, eof - startoffset);
1968 skipbytes = 0;
1969
1970 KASSERT(bytes != 0);
1971
1972 /* Swap PG_DELWRI for PG_PAGEOUT */
1973 for (i = 0; i < npages; i++) {
1974 if (pgs[i]->flags & PG_DELWRI) {
1975 KASSERT(!(pgs[i]->flags & PG_PAGEOUT));
1976 pgs[i]->flags &= ~PG_DELWRI;
1977 pgs[i]->flags |= PG_PAGEOUT;
1978 uvm_pageout_start(1);
1979 mutex_enter(vp->v_interlock);
1980 mutex_enter(&uvm_pageqlock);
1981 uvm_pageunwire(pgs[i]);
1982 mutex_exit(&uvm_pageqlock);
1983 mutex_exit(vp->v_interlock);
1984 }
1985 }
1986
1987 /*
1988 * Check to make sure we're starting on a block boundary.
1989 * We'll check later to make sure we always write entire
1990 * blocks (or fragments).
1991 */
1992 if (startoffset & lfs_sb_getbmask(fs))
1993 printf("%" PRId64 " & %" PRIu64 " = %" PRId64 "\n",
1994 startoffset, lfs_sb_getbmask(fs),
1995 startoffset & lfs_sb_getbmask(fs));
1996 KASSERT((startoffset & lfs_sb_getbmask(fs)) == 0);
1997 if (bytes & lfs_sb_getffmask(fs)) {
1998 printf("lfs_gop_write: asked to write %ld bytes\n", (long)bytes);
1999 panic("lfs_gop_write: non-integer blocks");
2000 }
2001
2002 /*
2003 * We could deadlock here on pager_map with UVMPAGER_MAPIN_WAITOK.
2004 * If we would, write what we have and try again. If we don't
2005 * have anything to write, we'll have to sleep.
2006 */
2007 ssp = (SEGSUM *)sp->segsum;
2008 if ((kva = uvm_pagermapin(pgs, npages, UVMPAGER_MAPIN_WRITE |
2009 (lfs_ss_getnfinfo(fs, ssp) < 1 ?
2010 UVMPAGER_MAPIN_WAITOK : 0))) == 0x0) {
2011 DLOG((DLOG_PAGE, "lfs_gop_write: forcing write\n"));
2012 #if 0
2013 " with nfinfo=%d at offset 0x%jx\n",
2014 (int)lfs_ss_getnfinfo(fs, ssp),
2015 (uintmax_t)lfs_sb_getoffset(fs)));
2016 #endif
2017 lfs_updatemeta(sp);
2018 lfs_release_finfo(fs);
2019 (void) lfs_writeseg(fs, sp);
2020
2021 lfs_acquire_finfo(fs, ip->i_number, ip->i_gen);
2022
2023 /*
2024 * Having given up all of the pager_map we were holding,
2025 * we can now wait for aiodoned to reclaim it for us
2026 * without fear of deadlock.
2027 */
2028 kva = uvm_pagermapin(pgs, npages, UVMPAGER_MAPIN_WRITE |
2029 UVMPAGER_MAPIN_WAITOK);
2030 }
2031
2032 mbp = getiobuf(NULL, true);
2033 UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
2034 vp, mbp, vp->v_numoutput, bytes);
2035 mbp->b_bufsize = npages << PAGE_SHIFT;
2036 mbp->b_data = (void *)kva;
2037 mbp->b_resid = mbp->b_bcount = bytes;
2038 mbp->b_cflags = BC_BUSY|BC_AGE;
2039 mbp->b_iodone = uvm_aio_biodone;
2040
2041 bp = NULL;
2042 for (offset = startoffset;
2043 bytes > 0;
2044 offset += iobytes, bytes -= iobytes) {
2045 lbn = offset >> fs_bshift;
2046 error = ulfs_bmaparray(vp, lbn, &blkno, NULL, NULL, &run,
2047 lfs_issequential_hole);
2048 if (error) {
2049 UVMHIST_LOG(ubchist, "ulfs_bmaparray() -> %d",
2050 error,0,0,0);
2051 skipbytes += bytes;
2052 bytes = 0;
2053 break;
2054 }
2055
2056 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
2057 bytes);
2058 if (blkno == (daddr_t)-1) {
2059 skipbytes += iobytes;
2060 continue;
2061 }
2062
2063 /*
2064 * Discover how much we can really pack into this buffer.
2065 */
2066 /* If no room in the current segment, finish it up */
2067 if (sp->sum_bytes_left < sizeof(int32_t) ||
2068 sp->seg_bytes_left < (1 << lfs_sb_getbshift(fs))) {
2069 int vers;
2070
2071 lfs_updatemeta(sp);
2072 vers = lfs_fi_getversion(fs, sp->fip);
2073 lfs_release_finfo(fs);
2074 (void) lfs_writeseg(fs, sp);
2075
2076 lfs_acquire_finfo(fs, ip->i_number, vers);
2077 }
2078 /* Check both for space in segment and space in segsum */
2079 iobytes = MIN(iobytes, (sp->seg_bytes_left >> fs_bshift)
2080 << fs_bshift);
2081 iobytes = MIN(iobytes, (sp->sum_bytes_left / sizeof(int32_t))
2082 << fs_bshift);
2083 KASSERT(iobytes > 0);
2084
2085 /* if it's really one i/o, don't make a second buf */
2086 if (offset == startoffset && iobytes == bytes) {
2087 bp = mbp;
2088 /*
2089 * All the LFS output is done by the segwriter. It
2090 * will increment numoutput by one for all the bufs it
2091 * recieves. However this buffer needs one extra to
2092 * account for aiodone.
2093 */
2094 mutex_enter(vp->v_interlock);
2095 vp->v_numoutput++;
2096 mutex_exit(vp->v_interlock);
2097 } else {
2098 bp = getiobuf(NULL, true);
2099 UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
2100 vp, bp, vp->v_numoutput, 0);
2101 nestiobuf_setup(mbp, bp, offset - pg->offset, iobytes);
2102 /*
2103 * LFS doesn't like async I/O here, dies with
2104 * an assert in lfs_bwrite(). Is that assert
2105 * valid? I retained non-async behaviour when
2106 * converted this to use nestiobuf --pooka
2107 */
2108 bp->b_flags &= ~B_ASYNC;
2109 }
2110
2111 /* XXX This is silly ... is this necessary? */
2112 mutex_enter(&bufcache_lock);
2113 mutex_enter(vp->v_interlock);
2114 bgetvp(vp, bp);
2115 mutex_exit(vp->v_interlock);
2116 mutex_exit(&bufcache_lock);
2117
2118 bp->b_lblkno = lfs_lblkno(fs, offset);
2119 bp->b_private = mbp;
2120 if (devvp->v_type == VBLK) {
2121 bp->b_dev = devvp->v_rdev;
2122 }
2123 VOP_BWRITE(bp->b_vp, bp);
2124 while (lfs_gatherblock(sp, bp, NULL))
2125 continue;
2126 }
2127
2128 nestiobuf_done(mbp, skipbytes, error);
2129 if (skipbytes) {
2130 UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
2131 }
2132 UVMHIST_LOG(ubchist, "returning 0", 0,0,0,0);
2133
2134 if (!async) {
2135 /* Start a segment write. */
2136 UVMHIST_LOG(ubchist, "flushing", 0,0,0,0);
2137 mutex_enter(&lfs_lock);
2138 lfs_flush(fs, 0, 1);
2139 mutex_exit(&lfs_lock);
2140 }
2141
2142 if ((sp->seg_flags & SEGM_SINGLE) && lfs_sb_getcurseg(fs) != fs->lfs_startseg)
2143 return EAGAIN;
2144
2145 return (0);
2146
2147 tryagain:
2148 /*
2149 * We can't write the pages, for whatever reason.
2150 * Clean up after ourselves, and make the caller try again.
2151 */
2152 mutex_enter(vp->v_interlock);
2153
2154 /* Tell why we're here, if we know */
2155 if (failreason != NULL) {
2156 DLOG((DLOG_PAGE, "lfs_gop_write: %s\n", failreason));
2157 }
2158 if (haveeof && startoffset >= eof) {
2159 DLOG((DLOG_PAGE, "lfs_gop_write: ino %d start 0x%" PRIx64
2160 " eof 0x%" PRIx64 " npages=%d\n", VTOI(vp)->i_number,
2161 pgs[0]->offset, eof, npages));
2162 }
2163
2164 mutex_enter(&uvm_pageqlock);
2165 for (i = 0; i < npages; i++) {
2166 pg = pgs[i];
2167
2168 if (pg->flags & PG_PAGEOUT)
2169 uvm_pageout_done(1);
2170 if (pg->flags & PG_DELWRI) {
2171 uvm_pageunwire(pg);
2172 }
2173 uvm_pageactivate(pg);
2174 pg->flags &= ~(PG_CLEAN|PG_DELWRI|PG_PAGEOUT|PG_RELEASED);
2175 DLOG((DLOG_PAGE, "pg[%d] = %p (vp %p off %" PRIx64 ")\n", i, pg,
2176 vp, pg->offset));
2177 DLOG((DLOG_PAGE, "pg[%d]->flags = %x\n", i, pg->flags));
2178 DLOG((DLOG_PAGE, "pg[%d]->pqflags = %x\n", i, pg->pqflags));
2179 DLOG((DLOG_PAGE, "pg[%d]->uanon = %p\n", i, pg->uanon));
2180 DLOG((DLOG_PAGE, "pg[%d]->uobject = %p\n", i, pg->uobject));
2181 DLOG((DLOG_PAGE, "pg[%d]->wire_count = %d\n", i,
2182 pg->wire_count));
2183 DLOG((DLOG_PAGE, "pg[%d]->loan_count = %d\n", i,
2184 pg->loan_count));
2185 }
2186 /* uvm_pageunbusy takes care of PG_BUSY, PG_WANTED */
2187 uvm_page_unbusy(pgs, npages);
2188 mutex_exit(&uvm_pageqlock);
2189 mutex_exit(vp->v_interlock);
2190 return EAGAIN;
2191 }
2192
2193 /*
2194 * finish vnode/inode initialization.
2195 * used by lfs_vget.
2196 */
2197 void
2198 lfs_vinit(struct mount *mp, struct vnode **vpp)
2199 {
2200 struct vnode *vp = *vpp;
2201 struct inode *ip = VTOI(vp);
2202 struct ulfsmount *ump = VFSTOULFS(mp);
2203 struct lfs *fs = ump->um_lfs;
2204 int i;
2205
2206 ip->i_mode = ip->i_ffs1_mode;
2207 ip->i_nlink = ip->i_ffs1_nlink;
2208 ip->i_lfs_osize = ip->i_size = ip->i_ffs1_size;
2209 ip->i_flags = ip->i_ffs1_flags;
2210 ip->i_gen = ip->i_ffs1_gen;
2211 ip->i_uid = ip->i_ffs1_uid;
2212 ip->i_gid = ip->i_ffs1_gid;
2213
2214 ip->i_lfs_effnblks = ip->i_ffs1_blocks;
2215 ip->i_lfs_odnlink = ip->i_ffs1_nlink;
2216
2217 /*
2218 * Initialize the vnode from the inode, check for aliases. In all
2219 * cases re-init ip, the underlying vnode/inode may have changed.
2220 */
2221 ulfs_vinit(mp, lfs_specop_p, lfs_fifoop_p, &vp);
2222 ip = VTOI(vp);
2223
2224 memset(ip->i_lfs_fragsize, 0, ULFS_NDADDR * sizeof(*ip->i_lfs_fragsize));
2225 if (vp->v_type != VLNK || ip->i_size >= ip->i_lfs->um_maxsymlinklen) {
2226 #ifdef DEBUG
2227 for (i = (ip->i_size + lfs_sb_getbsize(fs) - 1) >> lfs_sb_getbshift(fs);
2228 i < ULFS_NDADDR; i++) {
2229 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
2230 i == 0)
2231 continue;
2232 if (ip->i_ffs1_db[i] != 0) {
2233 lfs_dump_dinode(fs, ip->i_din);
2234 panic("inconsistent inode (direct)");
2235 }
2236 }
2237 for ( ; i < ULFS_NDADDR + ULFS_NIADDR; i++) {
2238 if (ip->i_ffs1_ib[i - ULFS_NDADDR] != 0) {
2239 lfs_dump_dinode(fs, ip->i_din);
2240 panic("inconsistent inode (indirect)");
2241 }
2242 }
2243 #endif /* DEBUG */
2244 for (i = 0; i < ULFS_NDADDR; i++)
2245 if (ip->i_ffs1_db[i] != 0)
2246 ip->i_lfs_fragsize[i] = lfs_blksize(fs, ip, i);
2247 }
2248
2249 #ifdef DIAGNOSTIC
2250 if (vp->v_type == VNON) {
2251 # ifdef DEBUG
2252 lfs_dump_dinode(fs, ip->i_din);
2253 # endif
2254 panic("lfs_vinit: ino %llu is type VNON! (ifmt=%o)\n",
2255 (unsigned long long)ip->i_number,
2256 (ip->i_mode & LFS_IFMT) >> 12);
2257 }
2258 #endif /* DIAGNOSTIC */
2259
2260 /*
2261 * Finish inode initialization now that aliasing has been resolved.
2262 */
2263
2264 ip->i_devvp = ump->um_devvp;
2265 vref(ip->i_devvp);
2266 #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
2267 ulfsquota_init(ip);
2268 #endif
2269 genfs_node_init(vp, &lfs_genfsops);
2270 uvm_vnp_setsize(vp, ip->i_size);
2271
2272 /* Initialize hiblk from file size */
2273 ip->i_lfs_hiblk = lfs_lblkno(ip->i_lfs, ip->i_size + lfs_sb_getbsize(ip->i_lfs) - 1) - 1;
2274
2275 *vpp = vp;
2276 }
2277
2278 /*
2279 * Resize the filesystem to contain the specified number of segments.
2280 */
2281 int
2282 lfs_resize_fs(struct lfs *fs, int newnsegs)
2283 {
2284 SEGUSE *sup;
2285 CLEANERINFO *cip;
2286 struct buf *bp, *obp;
2287 daddr_t olast, nlast, ilast, noff, start, end;
2288 struct vnode *ivp;
2289 struct inode *ip;
2290 int error, badnews, inc, oldnsegs;
2291 int sbbytes, csbbytes, gain, cgain;
2292 int i;
2293
2294 /* Only support v2 and up */
2295 if (lfs_sb_getversion(fs) < 2)
2296 return EOPNOTSUPP;
2297
2298 /* If we're doing nothing, do it fast */
2299 oldnsegs = lfs_sb_getnseg(fs);
2300 if (newnsegs == oldnsegs)
2301 return 0;
2302
2303 /* We always have to have two superblocks */
2304 if (newnsegs <= lfs_dtosn(fs, lfs_sb_getsboff(fs, 1)))
2305 /* XXX this error code is rather nonsense */
2306 return EFBIG;
2307
2308 ivp = fs->lfs_ivnode;
2309 ip = VTOI(ivp);
2310 error = 0;
2311
2312 /* Take the segment lock so no one else calls lfs_newseg() */
2313 lfs_seglock(fs, SEGM_PROT);
2314
2315 /*
2316 * Make sure the segments we're going to be losing, if any,
2317 * are in fact empty. We hold the seglock, so their status
2318 * cannot change underneath us. Count the superblocks we lose,
2319 * while we're at it.
2320 */
2321 sbbytes = csbbytes = 0;
2322 cgain = 0;
2323 for (i = newnsegs; i < oldnsegs; i++) {
2324 LFS_SEGENTRY(sup, fs, i, bp);
2325 badnews = sup->su_nbytes || !(sup->su_flags & SEGUSE_INVAL);
2326 if (sup->su_flags & SEGUSE_SUPERBLOCK)
2327 sbbytes += LFS_SBPAD;
2328 if (!(sup->su_flags & SEGUSE_DIRTY)) {
2329 ++cgain;
2330 if (sup->su_flags & SEGUSE_SUPERBLOCK)
2331 csbbytes += LFS_SBPAD;
2332 }
2333 brelse(bp, 0);
2334 if (badnews) {
2335 error = EBUSY;
2336 goto out;
2337 }
2338 }
2339
2340 /* Note old and new segment table endpoints, and old ifile size */
2341 olast = lfs_sb_getcleansz(fs) + lfs_sb_getsegtabsz(fs);
2342 nlast = howmany(newnsegs, lfs_sb_getsepb(fs)) + lfs_sb_getcleansz(fs);
2343 ilast = ivp->v_size >> lfs_sb_getbshift(fs);
2344 noff = nlast - olast;
2345
2346 /*
2347 * Make sure no one can use the Ifile while we change it around.
2348 * Even after taking the iflock we need to make sure no one still
2349 * is holding Ifile buffers, so we get each one, to drain them.
2350 * (XXX this could be done better.)
2351 */
2352 rw_enter(&fs->lfs_iflock, RW_WRITER);
2353 for (i = 0; i < ilast; i++) {
2354 /* XXX what to do if bread fails? */
2355 bread(ivp, i, lfs_sb_getbsize(fs), 0, &bp);
2356 brelse(bp, 0);
2357 }
2358
2359 /* Allocate new Ifile blocks */
2360 for (i = ilast; i < ilast + noff; i++) {
2361 if (lfs_balloc(ivp, i * lfs_sb_getbsize(fs), lfs_sb_getbsize(fs), NOCRED, 0,
2362 &bp) != 0)
2363 panic("balloc extending ifile");
2364 memset(bp->b_data, 0, lfs_sb_getbsize(fs));
2365 VOP_BWRITE(bp->b_vp, bp);
2366 }
2367
2368 /* Register new ifile size */
2369 ip->i_size += noff * lfs_sb_getbsize(fs);
2370 ip->i_ffs1_size = ip->i_size;
2371 uvm_vnp_setsize(ivp, ip->i_size);
2372
2373 /* Copy the inode table to its new position */
2374 if (noff != 0) {
2375 if (noff < 0) {
2376 start = nlast;
2377 end = ilast + noff;
2378 inc = 1;
2379 } else {
2380 start = ilast + noff - 1;
2381 end = nlast - 1;
2382 inc = -1;
2383 }
2384 for (i = start; i != end; i += inc) {
2385 if (bread(ivp, i, lfs_sb_getbsize(fs),
2386 B_MODIFY, &bp) != 0)
2387 panic("resize: bread dst blk failed");
2388 if (bread(ivp, i - noff, lfs_sb_getbsize(fs),
2389 0, &obp))
2390 panic("resize: bread src blk failed");
2391 memcpy(bp->b_data, obp->b_data, lfs_sb_getbsize(fs));
2392 VOP_BWRITE(bp->b_vp, bp);
2393 brelse(obp, 0);
2394 }
2395 }
2396
2397 /* If we are expanding, write the new empty SEGUSE entries */
2398 if (newnsegs > oldnsegs) {
2399 for (i = oldnsegs; i < newnsegs; i++) {
2400 if ((error = bread(ivp, i / lfs_sb_getsepb(fs) +
2401 lfs_sb_getcleansz(fs), lfs_sb_getbsize(fs),
2402 B_MODIFY, &bp)) != 0)
2403 panic("lfs: ifile read: %d", error);
2404 while ((i + 1) % lfs_sb_getsepb(fs) && i < newnsegs) {
2405 sup = &((SEGUSE *)bp->b_data)[i % lfs_sb_getsepb(fs)];
2406 memset(sup, 0, sizeof(*sup));
2407 i++;
2408 }
2409 VOP_BWRITE(bp->b_vp, bp);
2410 }
2411 }
2412
2413 /* Zero out unused superblock offsets */
2414 for (i = 2; i < LFS_MAXNUMSB; i++)
2415 if (lfs_dtosn(fs, lfs_sb_getsboff(fs, i)) >= newnsegs)
2416 lfs_sb_setsboff(fs, i, 0x0);
2417
2418 /*
2419 * Correct superblock entries that depend on fs size.
2420 * The computations of these are as follows:
2421 *
2422 * size = lfs_segtod(fs, nseg)
2423 * dsize = lfs_segtod(fs, nseg - minfreeseg) - lfs_btofsb(#super * LFS_SBPAD)
2424 * bfree = dsize - lfs_btofsb(fs, bsize * nseg / 2) - blocks_actually_used
2425 * avail = lfs_segtod(fs, nclean) - lfs_btofsb(#clean_super * LFS_SBPAD)
2426 * + (lfs_segtod(fs, 1) - (offset - curseg))
2427 * - lfs_segtod(fs, minfreeseg - (minfreeseg / 2))
2428 *
2429 * XXX - we should probably adjust minfreeseg as well.
2430 */
2431 gain = (newnsegs - oldnsegs);
2432 lfs_sb_setnseg(fs, newnsegs);
2433 lfs_sb_setsegtabsz(fs, nlast - lfs_sb_getcleansz(fs));
2434 lfs_sb_addsize(fs, gain * lfs_btofsb(fs, lfs_sb_getssize(fs)));
2435 lfs_sb_adddsize(fs, gain * lfs_btofsb(fs, lfs_sb_getssize(fs)) - lfs_btofsb(fs, sbbytes));
2436 lfs_sb_addbfree(fs, gain * lfs_btofsb(fs, lfs_sb_getssize(fs)) - lfs_btofsb(fs, sbbytes)
2437 - gain * lfs_btofsb(fs, lfs_sb_getbsize(fs) / 2));
2438 if (gain > 0) {
2439 lfs_sb_addnclean(fs, gain);
2440 lfs_sb_addavail(fs, gain * lfs_btofsb(fs, lfs_sb_getssize(fs)));
2441 } else {
2442 lfs_sb_subnclean(fs, cgain);
2443 lfs_sb_subavail(fs, cgain * lfs_btofsb(fs, lfs_sb_getssize(fs)) -
2444 lfs_btofsb(fs, csbbytes));
2445 }
2446
2447 /* Resize segment flag cache */
2448 fs->lfs_suflags[0] = realloc(fs->lfs_suflags[0],
2449 lfs_sb_getnseg(fs) * sizeof(u_int32_t), M_SEGMENT, M_WAITOK);
2450 fs->lfs_suflags[1] = realloc(fs->lfs_suflags[1],
2451 lfs_sb_getnseg(fs) * sizeof(u_int32_t), M_SEGMENT, M_WAITOK);
2452 for (i = oldnsegs; i < newnsegs; i++)
2453 fs->lfs_suflags[0][i] = fs->lfs_suflags[1][i] = 0x0;
2454
2455 /* Truncate Ifile if necessary */
2456 if (noff < 0)
2457 lfs_truncate(ivp, ivp->v_size + (noff << lfs_sb_getbshift(fs)), 0,
2458 NOCRED);
2459
2460 /* Update cleaner info so the cleaner can die */
2461 /* XXX what to do if bread fails? */
2462 bread(ivp, 0, lfs_sb_getbsize(fs), B_MODIFY, &bp);
2463 cip = bp->b_data;
2464 lfs_ci_setclean(fs, cip, lfs_sb_getnclean(fs));
2465 lfs_ci_setdirty(fs, cip, lfs_sb_getnseg(fs) - lfs_sb_getnclean(fs));
2466 VOP_BWRITE(bp->b_vp, bp);
2467
2468 /* Let Ifile accesses proceed */
2469 rw_exit(&fs->lfs_iflock);
2470
2471 out:
2472 lfs_segunlock(fs);
2473 return error;
2474 }
2475
2476 /*
2477 * Extended attribute dispatch
2478 */
2479 int
2480 lfs_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
2481 int attrnamespace, const char *attrname)
2482 {
2483 #ifdef LFS_EXTATTR
2484 struct ulfsmount *ump;
2485
2486 ump = VFSTOULFS(mp);
2487 if (ump->um_fstype == ULFS1) {
2488 return ulfs_extattrctl(mp, cmd, vp, attrnamespace, attrname);
2489 }
2490 #endif
2491 return vfs_stdextattrctl(mp, cmd, vp, attrnamespace, attrname);
2492 }
2493