1 /* $NetBSD: lfs_subr.c,v 1.107 2025/11/04 00:50:37 perseant Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Konrad E. Schroder <perseant (at) hhhh.org>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /* 32 * Copyright (c) 1991, 1993 33 * The Regents of the University of California. All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 3. Neither the name of the University nor the names of its contributors 44 * may be used to endorse or promote products derived from this software 45 * without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * SUCH DAMAGE. 58 * 59 * @(#)lfs_subr.c 8.4 (Berkeley) 5/8/95 60 */ 61 62 #include <sys/cdefs.h> 63 __KERNEL_RCSID(0, "$NetBSD: lfs_subr.c,v 1.107 2025/11/04 00:50:37 perseant Exp $"); 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/namei.h> 68 #include <sys/vnode.h> 69 #include <sys/buf.h> 70 #include <sys/mount.h> 71 #include <sys/malloc.h> 72 #include <sys/proc.h> 73 #include <sys/kauth.h> 74 75 #include <ufs/lfs/ulfs_inode.h> 76 #include <ufs/lfs/lfs.h> 77 #include <ufs/lfs/lfs_accessors.h> 78 #include <ufs/lfs/lfs_kernel.h> 79 #include <ufs/lfs/lfs_extern.h> 80 81 #ifdef DEBUG 82 const char *lfs_res_names[LFS_NB_COUNT] = { 83 "summary", 84 "superblock", 85 "file block", 86 "cluster", 87 "clean", 88 "blkiov", 89 }; 90 #endif 91 92 int lfs_res_qty[LFS_NB_COUNT] = { 93 LFS_N_SUMMARIES, 94 LFS_N_SBLOCKS, 95 LFS_N_IBLOCKS, 96 LFS_N_CLUSTERS, 97 LFS_N_CLEAN, 98 LFS_N_BLKIOV, 99 }; 100 101 void 102 lfs_setup_resblks(struct lfs *fs) 103 { 104 int i, j; 105 int maxbpp; 106 107 ASSERT_NO_SEGLOCK(fs); 108 fs->lfs_resblk = malloc(LFS_N_TOTAL * sizeof(res_t), M_SEGMENT, 109 M_WAITOK); 110 for (i = 0; i < LFS_N_TOTAL; i++) { 111 fs->lfs_resblk[i].inuse = 0; 112 fs->lfs_resblk[i].p = NULL; 113 } 114 for (i = 0; i < LFS_RESHASH_WIDTH; i++) 115 LIST_INIT(fs->lfs_reshash + i); 116 117 /* 118 * These types of allocations can be larger than a page, 119 * so we can't use the pool subsystem for them. 120 */ 121 for (i = 0, j = 0; j < LFS_N_SUMMARIES; j++, i++) 122 fs->lfs_resblk[i].size = lfs_sb_getsumsize(fs); 123 for (j = 0; j < LFS_N_SBLOCKS; j++, i++) 124 fs->lfs_resblk[i].size = LFS_SBPAD; 125 for (j = 0; j < LFS_N_IBLOCKS; j++, i++) 126 fs->lfs_resblk[i].size = lfs_sb_getbsize(fs); 127 for (j = 0; j < LFS_N_CLUSTERS; j++, i++) 128 fs->lfs_resblk[i].size = MAXPHYS; 129 for (j = 0; j < LFS_N_CLEAN; j++, i++) 130 fs->lfs_resblk[i].size = MAXPHYS; 131 for (j = 0; j < LFS_N_BLKIOV; j++, i++) 132 fs->lfs_resblk[i].size = LFS_MARKV_MAXBLKCNT * sizeof(BLOCK_INFO); 133 134 for (i = 0; i < LFS_N_TOTAL; i++) { 135 fs->lfs_resblk[i].p = malloc(fs->lfs_resblk[i].size, 136 M_SEGMENT, M_WAITOK); 137 } 138 139 /* 140 * Initialize pools for small types (XXX is BPP small?) 141 */ 142 pool_init(&fs->lfs_clpool, sizeof(struct lfs_cluster), 0, 0, 0, 143 "lfsclpl", &pool_allocator_nointr, IPL_NONE); 144 pool_init(&fs->lfs_segpool, sizeof(struct segment), 0, 0, 0, 145 "lfssegpool", &pool_allocator_nointr, IPL_NONE); 146 /* XXX: should this int32 be 32/64? */ 147 maxbpp = ((lfs_sb_getsumsize(fs) - SEGSUM_SIZE(fs)) / sizeof(int32_t) + 2); 148 maxbpp = MIN(maxbpp, lfs_segsize(fs) / lfs_sb_getfsize(fs) + 2); 149 pool_init(&fs->lfs_bpppool, maxbpp * sizeof(struct buf *), 0, 0, 0, 150 "lfsbpppl", &pool_allocator_nointr, IPL_NONE); 151 } 152 153 void 154 lfs_free_resblks(struct lfs *fs) 155 { 156 int i; 157 158 pool_destroy(&fs->lfs_bpppool); 159 pool_destroy(&fs->lfs_segpool); 160 pool_destroy(&fs->lfs_clpool); 161 162 mutex_enter(&lfs_lock); 163 for (i = 0; i < LFS_N_TOTAL; i++) { 164 while (fs->lfs_resblk[i].inuse) 165 mtsleep(&fs->lfs_resblk, PRIBIO + 1, "lfs_free", 0, 166 &lfs_lock); 167 if (fs->lfs_resblk[i].p != NULL) 168 free(fs->lfs_resblk[i].p, M_SEGMENT); 169 } 170 free(fs->lfs_resblk, M_SEGMENT); 171 mutex_exit(&lfs_lock); 172 } 173 174 static unsigned int 175 lfs_mhash(void *vp) 176 { 177 return (unsigned int)(((unsigned long)vp) >> 2) % LFS_RESHASH_WIDTH; 178 } 179 180 /* 181 * Return memory of the given size for the given purpose, or use one of a 182 * number of spare last-resort buffers, if malloc returns NULL. 183 */ 184 void * 185 lfs_malloc(struct lfs *fs, size_t size, int type) 186 { 187 struct lfs_res_blk *re; 188 void *r; 189 int i, start; 190 unsigned int h; 191 192 ASSERT_MAYBE_SEGLOCK(fs); 193 r = NULL; 194 195 /* If no mem allocated for this type, it just waits */ 196 if (lfs_res_qty[type] == 0) { 197 r = malloc(size, M_SEGMENT, M_WAITOK); 198 return r; 199 } 200 201 /* Otherwise try a quick malloc, and if it works, great */ 202 if ((r = malloc(size, M_SEGMENT, M_NOWAIT)) != NULL) { 203 return r; 204 } 205 206 /* 207 * If malloc returned NULL, we are forced to use one of our 208 * reserve blocks. We have on hand at least one summary block, 209 * at least one cluster block, at least one superblock, 210 * and several indirect blocks. 211 */ 212 213 mutex_enter(&lfs_lock); 214 /* skip over blocks of other types */ 215 for (i = 0, start = 0; i < type; i++) 216 start += lfs_res_qty[i]; 217 while (r == NULL) { 218 for (i = 0; i < lfs_res_qty[type]; i++) { 219 if (fs->lfs_resblk[start + i].inuse == 0) { 220 re = fs->lfs_resblk + start + i; 221 re->inuse = 1; 222 r = re->p; 223 KASSERT(re->size >= size); 224 h = lfs_mhash(r); 225 LIST_INSERT_HEAD(&fs->lfs_reshash[h], re, res); 226 mutex_exit(&lfs_lock); 227 return r; 228 } 229 } 230 DLOG((DLOG_MALLOC, "sleeping on %s (%d)\n", 231 lfs_res_names[type], lfs_res_qty[type])); 232 mtsleep(&fs->lfs_resblk, PVM, "lfs_malloc", 0, 233 &lfs_lock); 234 DLOG((DLOG_MALLOC, "done sleeping on %s\n", 235 lfs_res_names[type])); 236 } 237 /* NOTREACHED */ 238 mutex_exit(&lfs_lock); 239 return r; 240 } 241 242 void 243 lfs_free(struct lfs *fs, void *p, int type) 244 { 245 unsigned int h; 246 res_t *re; 247 248 ASSERT_MAYBE_SEGLOCK(fs); 249 h = lfs_mhash(p); 250 mutex_enter(&lfs_lock); 251 LIST_FOREACH(re, &fs->lfs_reshash[h], res) { 252 if (re->p == p) { 253 KASSERT(re->inuse == 1); 254 LIST_REMOVE(re, res); 255 re->inuse = 0; 256 wakeup(&fs->lfs_resblk); 257 mutex_exit(&lfs_lock); 258 return; 259 } 260 } 261 262 #ifdef notyet /* XXX this assert fires */ 263 for (int i = 0; i < LFS_N_TOTAL; i++) { 264 KDASSERTMSG(fs->lfs_resblk[i].p == p, 265 "lfs_free: inconsistent reserved block"); 266 } 267 #endif 268 269 mutex_exit(&lfs_lock); 270 271 /* 272 * If we didn't find it, free it. 273 */ 274 free(p, M_SEGMENT); 275 } 276 277 /* 278 * lfs_seglock -- 279 * Single thread the segment writer. 280 */ 281 int 282 lfs_seglock(struct lfs *fs, unsigned long flags) 283 { 284 struct segment *sp; 285 286 mutex_enter(&lfs_lock); 287 if (fs->lfs_seglock) { 288 if (fs->lfs_lockpid == curproc->p_pid && 289 fs->lfs_locklwp == curlwp->l_lid) { 290 ++fs->lfs_seglock; 291 fs->lfs_sp->seg_flags |= flags; 292 mutex_exit(&lfs_lock); 293 return 0; 294 } else if (flags & SEGM_PAGEDAEMON) { 295 mutex_exit(&lfs_lock); 296 return EWOULDBLOCK; 297 } else { 298 while (fs->lfs_seglock) { 299 (void)mtsleep(&fs->lfs_seglock, PRIBIO + 1, 300 "lfs_seglock", 0, &lfs_lock); 301 } 302 } 303 } 304 305 fs->lfs_seglock = 1; 306 fs->lfs_lockpid = curproc->p_pid; 307 fs->lfs_locklwp = curlwp->l_lid; 308 mutex_exit(&lfs_lock); 309 fs->lfs_cleanind = 0; 310 311 LFS_ENTER_LOG("seglock", __FILE__, __LINE__, 0, flags, curproc->p_pid); 312 313 /* Drain fragment size changes out */ 314 rw_enter(&fs->lfs_fraglock, RW_WRITER); 315 316 sp = fs->lfs_sp = pool_get(&fs->lfs_segpool, PR_WAITOK); 317 sp->bpp = pool_get(&fs->lfs_bpppool, PR_WAITOK); 318 sp->seg_flags = flags; 319 sp->vp = NULL; 320 sp->seg_iocount = 0; 321 sp->bytes_written = 0; 322 sp->gatherblock_loopcount = 0; 323 (void) lfs_initseg(fs, 0); 324 325 /* 326 * Keep a cumulative count of the outstanding I/O operations. If the 327 * disk drive catches up with us it could go to zero before we finish, 328 * so we artificially increment it by one until we've scheduled all of 329 * the writes we intend to do. 330 */ 331 mutex_enter(&lfs_lock); 332 ++fs->lfs_iocount; 333 fs->lfs_startseg = lfs_sb_getcurseg(fs); 334 mutex_exit(&lfs_lock); 335 return 0; 336 } 337 338 /* 339 * Create a marker inode. 340 */ 341 struct inode * 342 lfs_create_marker(void) 343 { 344 struct inode *marker; 345 346 marker = pool_get(&lfs_inode_pool, PR_WAITOK); 347 memset(marker, 0, sizeof(*marker)); 348 marker->inode_ext.lfs = pool_get(&lfs_inoext_pool, PR_WAITOK); 349 memset(marker->inode_ext.lfs, 0, sizeof(*marker->inode_ext.lfs)); 350 marker->i_state |= IN_MARKER; 351 352 return marker; 353 } 354 355 void 356 lfs_destroy_marker(struct inode *marker) 357 { 358 pool_put(&lfs_inoext_pool, marker->inode_ext.lfs); 359 pool_put(&lfs_inode_pool, marker); 360 } 361 362 static void lfs_unmark_dirop(struct lfs *); 363 364 static void 365 lfs_unmark_dirop(struct lfs *fs) 366 { 367 struct inode *ip, *marker; 368 struct vnode *vp; 369 int doit; 370 371 KASSERT(fs != NULL); 372 ASSERT_NO_SEGLOCK(fs); 373 mutex_enter(&lfs_lock); 374 doit = !(fs->lfs_flags & LFS_UNDIROP); 375 if (doit) 376 fs->lfs_flags |= LFS_UNDIROP; 377 mutex_exit(&lfs_lock); 378 379 if (!doit) 380 return; 381 382 marker = lfs_create_marker(); 383 384 mutex_enter(&lfs_lock); 385 TAILQ_INSERT_HEAD(&fs->lfs_dchainhd, marker, i_lfs_dchain); 386 while ((ip = TAILQ_NEXT(marker, i_lfs_dchain)) != NULL) { 387 TAILQ_REMOVE(&fs->lfs_dchainhd, marker, i_lfs_dchain); 388 TAILQ_INSERT_AFTER(&fs->lfs_dchainhd, ip, marker, 389 i_lfs_dchain); 390 if (ip->i_state & IN_MARKER) 391 continue; 392 vp = ITOV(ip); 393 if ((ip->i_state & (IN_ADIROP | IN_CDIROP)) == IN_CDIROP) { 394 --lfs_dirvcount; 395 --fs->lfs_dirvcount; 396 vp->v_uflag &= ~VU_DIROP; 397 TAILQ_REMOVE(&fs->lfs_dchainhd, ip, i_lfs_dchain); 398 wakeup(&lfs_dirvcount); 399 fs->lfs_unlockvp = vp; 400 mutex_exit(&lfs_lock); 401 vrele(vp); 402 mutex_enter(&lfs_lock); 403 fs->lfs_unlockvp = NULL; 404 ip->i_state &= ~IN_CDIROP; 405 } 406 } 407 TAILQ_REMOVE(&fs->lfs_dchainhd, marker, i_lfs_dchain); 408 fs->lfs_flags &= ~LFS_UNDIROP; 409 wakeup(&fs->lfs_flags); 410 mutex_exit(&lfs_lock); 411 412 lfs_destroy_marker(marker); 413 } 414 415 static void 416 lfs_auto_segclean(struct lfs *fs) 417 { 418 int i, waited, changed; 419 SEGUSE *sup; 420 struct buf *bp; 421 422 ASSERT_SEGLOCK(fs); 423 /* 424 * Now that we've swapped lfs_activesb, but while we still 425 * hold the segment lock, run through the segment list promoting 426 * empty segments. 427 * XXX - do we really need to do them all at once? 428 */ 429 waited = 0; 430 for (i = 0; i < lfs_sb_getnseg(fs); i++) { 431 changed = 0; 432 LFS_SEGENTRY(sup, fs, i, bp); 433 if (sup->su_nbytes == 0) { 434 switch (sup->su_flags & (SEGUSE_ACTIVE 435 | SEGUSE_DIRTY 436 | SEGUSE_EMPTY 437 | SEGUSE_READY)) { 438 case SEGUSE_DIRTY: 439 sup->su_flags |= SEGUSE_EMPTY; 440 ++changed; 441 break; 442 443 case SEGUSE_DIRTY | SEGUSE_EMPTY: 444 sup->su_flags |= SEGUSE_READY; 445 ++changed; 446 break; 447 448 case SEGUSE_DIRTY | SEGUSE_EMPTY | SEGUSE_READY: 449 /* Make sure the sb is written */ 450 mutex_enter(&lfs_lock); 451 while (waited == 0 && fs->lfs_sbactive) 452 mtsleep(&fs->lfs_sbactive, PRIBIO+1, 453 "lfs asb", 0, &lfs_lock); 454 mutex_exit(&lfs_lock); 455 waited = 1; 456 457 lfs_markclean(fs, i, sup, NOCRED, curlwp); 458 ++changed; 459 break; 460 461 default: 462 break; 463 } 464 } 465 if (changed) 466 LFS_WRITESEGENTRY(sup, fs, i, bp); 467 else 468 brelse(bp, 0); 469 } 470 } 471 472 /* 473 * lfs_segunlock -- 474 * Single thread the segment writer. 475 */ 476 void 477 lfs_segunlock(struct lfs *fs) 478 { 479 struct segment *sp; 480 unsigned long sync, ckp; 481 struct buf *bp; 482 int do_unmark_dirop = 0; 483 484 sp = fs->lfs_sp; 485 486 mutex_enter(&lfs_lock); 487 488 if (!LFS_SEGLOCK_HELD(fs)) 489 panic("lfs seglock not held"); 490 491 if (fs->lfs_seglock == 1) { 492 if ((sp->seg_flags & (SEGM_PROT | SEGM_CLEAN)) == 0) 493 do_unmark_dirop = 1; 494 mutex_exit(&lfs_lock); 495 sync = sp->seg_flags & SEGM_SYNC; 496 ckp = sp->seg_flags & SEGM_CKP; 497 498 /* We should have a segment summary, and nothing else */ 499 KASSERT(sp->cbpp == sp->bpp + 1); 500 501 /* Free allocated segment summary */ 502 lfs_sb_suboffset(fs, lfs_btofsb(fs, lfs_sb_getsumsize(fs))); 503 bp = *sp->bpp; 504 lfs_freebuf(fs, bp); 505 506 pool_put(&fs->lfs_bpppool, sp->bpp); 507 sp->bpp = NULL; 508 509 /* 510 * If we're not sync, we're done with sp, get rid of it. 511 * Otherwise, we keep a local copy around but free 512 * fs->lfs_sp so another process can use it (we have to 513 * wait but they don't have to wait for us). 514 */ 515 if (!sync) 516 pool_put(&fs->lfs_segpool, sp); 517 fs->lfs_sp = NULL; 518 519 /* 520 * If the I/O count is non-zero, sleep until it reaches zero. 521 * At the moment, the user's process hangs around so we can 522 * sleep. 523 */ 524 mutex_enter(&lfs_lock); 525 if (--fs->lfs_iocount <= 1) 526 wakeup(&fs->lfs_iocount); 527 mutex_exit(&lfs_lock); 528 529 /* 530 * If we're not checkpointing, we don't have to block 531 * other processes to wait for a synchronous write 532 * to complete. 533 */ 534 if (!ckp) { 535 LFS_ENTER_LOG("segunlock_std", __FILE__, __LINE__, 0, 0, curproc->p_pid); 536 537 mutex_enter(&lfs_lock); 538 --fs->lfs_seglock; 539 fs->lfs_lockpid = 0; 540 fs->lfs_locklwp = 0; 541 mutex_exit(&lfs_lock); 542 wakeup(&fs->lfs_seglock); 543 } 544 /* 545 * We let checkpoints happen asynchronously. That means 546 * that during recovery, we have to roll forward between 547 * the two segments described by the first and second 548 * superblocks to make sure that the checkpoint described 549 * by a superblock completed. 550 */ 551 mutex_enter(&lfs_lock); 552 while (ckp && sync && fs->lfs_iocount) { 553 (void)mtsleep(&fs->lfs_iocount, PRIBIO + 1, 554 "lfs_iocount", 0, &lfs_lock); 555 DLOG((DLOG_SEG, "sleeping on iocount %x == %d\n", fs, fs->lfs_iocount)); 556 } 557 while (sync && sp->seg_iocount) { 558 (void)mtsleep(&sp->seg_iocount, PRIBIO + 1, 559 "seg_iocount", 0, &lfs_lock); 560 DLOG((DLOG_SEG, "sleeping on iocount %x == %d\n", sp, sp->seg_iocount)); 561 } 562 mutex_exit(&lfs_lock); 563 if (sync) 564 pool_put(&fs->lfs_segpool, sp); 565 566 if (ckp) { 567 fs->lfs_nactive = 0; 568 /* If we *know* everything's on disk, write both sbs */ 569 /* XXX should wait for this one */ 570 if (sync) 571 lfs_writesuper(fs, lfs_sb_getsboff(fs, fs->lfs_activesb)); 572 lfs_writesuper(fs, lfs_sb_getsboff(fs, 1 - fs->lfs_activesb)); 573 if (!(fs->lfs_ivnode->v_mount->mnt_iflag & IMNT_UNMOUNT)) { 574 lfs_auto_segclean(fs); 575 /* If sync, we can clean the remainder too */ 576 if (sync) 577 lfs_auto_segclean(fs); 578 } 579 fs->lfs_activesb = 1 - fs->lfs_activesb; 580 581 LFS_ENTER_LOG("segunlock_ckp", __FILE__, __LINE__, 0, 0, curproc->p_pid); 582 583 mutex_enter(&lfs_lock); 584 --fs->lfs_seglock; 585 fs->lfs_lockpid = 0; 586 fs->lfs_locklwp = 0; 587 mutex_exit(&lfs_lock); 588 wakeup(&fs->lfs_seglock); 589 } 590 /* Reenable fragment size changes */ 591 rw_exit(&fs->lfs_fraglock); 592 if (do_unmark_dirop) 593 lfs_unmark_dirop(fs); 594 } else { 595 --fs->lfs_seglock; 596 KASSERT(fs->lfs_seglock != 0); 597 mutex_exit(&lfs_lock); 598 } 599 } 600 601 /* 602 * Single thread the cleaner. 603 */ 604 int 605 lfs_cleanerlock(struct lfs *fs) 606 { 607 int error; 608 609 mutex_enter(&lfs_lock); 610 while (fs->lfs_cleanlock) { 611 printf("cleanlock=%p, waiting\n", fs->lfs_cleanlock); 612 error = cv_wait_sig(&fs->lfs_cleanercv, &lfs_lock); 613 if (error) 614 break; 615 } 616 if (error == 0) 617 fs->lfs_cleanlock = curlwp; 618 mutex_exit(&lfs_lock); 619 620 return error; 621 } 622 623 /* 624 * Check whether we hold the cleaner lock. 625 */ 626 int 627 lfs_cleanerlock_held(struct lfs *fs) 628 { 629 int retval = 0; 630 631 mutex_enter(&lfs_lock); 632 retval = (fs->lfs_cleanlock == curlwp); 633 mutex_exit(&lfs_lock); 634 635 return retval; 636 } 637 638 /* 639 * Single thread the cleaner. 640 */ 641 void 642 lfs_cleanerunlock(struct lfs *fs) 643 { 644 struct inode *ip; 645 646 /* Clear out the cleaning list */ 647 while ((ip = TAILQ_FIRST(&fs->lfs_cleanhd)) != NULL) 648 lfs_clrclean(fs, ITOV(ip)); 649 650 mutex_enter(&lfs_lock); 651 fs->lfs_cleanlock = 0x0; 652 cv_broadcast(&fs->lfs_cleanercv); 653 mutex_exit(&lfs_lock); 654 } 655 656 /* 657 * Drain dirops and start writer. 658 * 659 * No simple_locks are held when we enter and none are held when we return. 660 */ 661 void 662 lfs_writer_enter(struct lfs *fs, const char *wmesg) 663 { 664 int error __diagused; 665 666 ASSERT_NO_SEGLOCK(fs); 667 mutex_enter(&lfs_lock); 668 669 /* disallow dirops during flush */ 670 fs->lfs_writer++; 671 672 while (fs->lfs_dirops > 0) { 673 ++fs->lfs_diropwait; 674 error = mtsleep(&fs->lfs_writer, PRIBIO+1, wmesg, 0, 675 &lfs_lock); 676 KASSERT(error == 0); 677 --fs->lfs_diropwait; 678 } 679 680 mutex_exit(&lfs_lock); 681 } 682 683 int 684 lfs_writer_tryenter(struct lfs *fs) 685 { 686 int writer_set; 687 688 ASSERT_MAYBE_SEGLOCK(fs); 689 mutex_enter(&lfs_lock); 690 writer_set = (fs->lfs_dirops == 0); 691 if (writer_set) 692 fs->lfs_writer++; 693 mutex_exit(&lfs_lock); 694 695 return writer_set; 696 } 697 698 void 699 lfs_writer_leave(struct lfs *fs) 700 { 701 bool dowakeup; 702 703 ASSERT_MAYBE_SEGLOCK(fs); 704 mutex_enter(&lfs_lock); 705 dowakeup = !(--fs->lfs_writer); 706 if (dowakeup) 707 cv_broadcast(&fs->lfs_diropscv); 708 mutex_exit(&lfs_lock); 709 } 710 711 /* 712 * Unlock, wait for the cleaner, then relock to where we were before. 713 * To be used only at a fairly high level, to address a paucity of free 714 * segments propagated back from lfs_gop_write(). 715 */ 716 void 717 lfs_segunlock_relock(struct lfs *fs) 718 { 719 int n = fs->lfs_seglock; 720 u_int16_t seg_flags; 721 CLEANERINFO *cip; 722 struct buf *bp; 723 724 if (n == 0) 725 return; 726 727 /* Write anything we've already gathered to disk */ 728 lfs_writeseg(fs, fs->lfs_sp); 729 730 /* Tell cleaner */ 731 LFS_CLEANERINFO(cip, fs, bp); 732 lfs_ci_setflags(fs, cip, 733 lfs_ci_getflags(fs, cip) | LFS_CLEANER_MUST_CLEAN); 734 LFS_SYNC_CLEANERINFO(cip, fs, bp, 1); 735 736 /* Save segment flags for later */ 737 seg_flags = fs->lfs_sp->seg_flags; 738 739 fs->lfs_sp->seg_flags |= SEGM_PROT; /* Don't unmark dirop nodes */ 740 while(fs->lfs_seglock) 741 lfs_segunlock(fs); 742 743 /* Wait for the cleaner */ 744 lfs_wakeup_cleaner(fs); 745 mutex_enter(&lfs_lock); 746 while (LFS_STARVED_FOR_SEGS(fs)) 747 mtsleep(&fs->lfs_availsleep, PRIBIO, "relock", 0, 748 &lfs_lock); 749 mutex_exit(&lfs_lock); 750 751 /* Put the segment lock back the way it was. */ 752 while(n--) 753 lfs_seglock(fs, seg_flags); 754 755 /* Cleaner can relax now */ 756 LFS_CLEANERINFO(cip, fs, bp); 757 lfs_ci_setflags(fs, cip, 758 lfs_ci_getflags(fs, cip) & ~LFS_CLEANER_MUST_CLEAN); 759 LFS_SYNC_CLEANERINFO(cip, fs, bp, 1); 760 761 return; 762 } 763 764 /* 765 * Wake up the cleaner, provided that nowrap is not set. 766 */ 767 void 768 lfs_wakeup_cleaner(struct lfs *fs) 769 { 770 if (fs->lfs_nowrap > 0) 771 return; 772 773 cv_broadcast(&fs->lfs_nextsegsleep); 774 cv_broadcast(&lfs_allclean_wakeup); 775 } 776 777 /* 778 * If it wasn't already on the cleaning list, 779 * add it and take a reference. We will clear 780 * the list before dropping the seglock. 781 */ 782 void 783 lfs_setclean(struct lfs *fs, struct vnode *vp) 784 { 785 struct inode *ip; 786 787 KASSERT(lfs_cleanerlock_held(fs)); 788 789 vref(vp); 790 791 ip = VTOI(vp); 792 mutex_enter(&lfs_lock); 793 if (ip->i_state & IN_CLEANING) { 794 mutex_exit(&lfs_lock); 795 vrele(vp); 796 return; 797 } 798 799 TAILQ_INSERT_HEAD(&fs->lfs_cleanhd, ip, i_lfs_clean); 800 LFS_SET_UINO(VTOI(vp), IN_CLEANING); 801 mutex_exit(&lfs_lock); 802 } 803 804 /* 805 * Remove a vnode from the cleaning list, 806 * clear IN_CLEANING and drop the reference. 807 * Find any invalid buffers on the vnode and 808 * toss them. 809 */ 810 void 811 lfs_clrclean(struct lfs *fs, struct vnode *vp) 812 { 813 struct inode *ip; 814 815 KASSERT(lfs_cleanerlock_held(fs)); 816 817 ip = VTOI(vp); 818 mutex_enter(&lfs_lock); 819 if (!(ip->i_state & IN_CLEANING)) { 820 mutex_exit(&lfs_lock); 821 return; 822 } 823 mutex_exit(&lfs_lock); 824 825 if (vp->v_type == VREG && vp != fs->lfs_ivnode) 826 lfs_ungather(fs, NULL, vp, lfs_match_data); 827 828 mutex_enter(&lfs_lock); 829 TAILQ_REMOVE(&fs->lfs_cleanhd, ip, i_lfs_clean); 830 LFS_CLR_UINO(VTOI(vp), IN_CLEANING); 831 mutex_exit(&lfs_lock); 832 vrele(vp); 833 } 834