1 /* $NetBSD: lfs_subr.c,v 1.105 2025/10/20 04:20:37 perseant Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Konrad E. Schroder <perseant (at) hhhh.org>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /* 32 * Copyright (c) 1991, 1993 33 * The Regents of the University of California. All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 3. Neither the name of the University nor the names of its contributors 44 * may be used to endorse or promote products derived from this software 45 * without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * SUCH DAMAGE. 58 * 59 * @(#)lfs_subr.c 8.4 (Berkeley) 5/8/95 60 */ 61 62 #include <sys/cdefs.h> 63 __KERNEL_RCSID(0, "$NetBSD: lfs_subr.c,v 1.105 2025/10/20 04:20:37 perseant Exp $"); 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/namei.h> 68 #include <sys/vnode.h> 69 #include <sys/buf.h> 70 #include <sys/mount.h> 71 #include <sys/malloc.h> 72 #include <sys/proc.h> 73 #include <sys/kauth.h> 74 75 #include <ufs/lfs/ulfs_inode.h> 76 #include <ufs/lfs/lfs.h> 77 #include <ufs/lfs/lfs_accessors.h> 78 #include <ufs/lfs/lfs_kernel.h> 79 #include <ufs/lfs/lfs_extern.h> 80 81 #ifdef DEBUG 82 const char *lfs_res_names[LFS_NB_COUNT] = { 83 "summary", 84 "superblock", 85 "file block", 86 "cluster", 87 "clean", 88 "blkiov", 89 }; 90 #endif 91 92 int lfs_res_qty[LFS_NB_COUNT] = { 93 LFS_N_SUMMARIES, 94 LFS_N_SBLOCKS, 95 LFS_N_IBLOCKS, 96 LFS_N_CLUSTERS, 97 LFS_N_CLEAN, 98 LFS_N_BLKIOV, 99 }; 100 101 void 102 lfs_setup_resblks(struct lfs *fs) 103 { 104 int i, j; 105 int maxbpp; 106 107 ASSERT_NO_SEGLOCK(fs); 108 fs->lfs_resblk = malloc(LFS_N_TOTAL * sizeof(res_t), M_SEGMENT, 109 M_WAITOK); 110 for (i = 0; i < LFS_N_TOTAL; i++) { 111 fs->lfs_resblk[i].inuse = 0; 112 fs->lfs_resblk[i].p = NULL; 113 } 114 for (i = 0; i < LFS_RESHASH_WIDTH; i++) 115 LIST_INIT(fs->lfs_reshash + i); 116 117 /* 118 * These types of allocations can be larger than a page, 119 * so we can't use the pool subsystem for them. 120 */ 121 for (i = 0, j = 0; j < LFS_N_SUMMARIES; j++, i++) 122 fs->lfs_resblk[i].size = lfs_sb_getsumsize(fs); 123 for (j = 0; j < LFS_N_SBLOCKS; j++, i++) 124 fs->lfs_resblk[i].size = LFS_SBPAD; 125 for (j = 0; j < LFS_N_IBLOCKS; j++, i++) 126 fs->lfs_resblk[i].size = lfs_sb_getbsize(fs); 127 for (j = 0; j < LFS_N_CLUSTERS; j++, i++) 128 fs->lfs_resblk[i].size = MAXPHYS; 129 for (j = 0; j < LFS_N_CLEAN; j++, i++) 130 fs->lfs_resblk[i].size = MAXPHYS; 131 for (j = 0; j < LFS_N_BLKIOV; j++, i++) 132 fs->lfs_resblk[i].size = LFS_MARKV_MAXBLKCNT * sizeof(BLOCK_INFO); 133 134 for (i = 0; i < LFS_N_TOTAL; i++) { 135 fs->lfs_resblk[i].p = malloc(fs->lfs_resblk[i].size, 136 M_SEGMENT, M_WAITOK); 137 } 138 139 /* 140 * Initialize pools for small types (XXX is BPP small?) 141 */ 142 pool_init(&fs->lfs_clpool, sizeof(struct lfs_cluster), 0, 0, 0, 143 "lfsclpl", &pool_allocator_nointr, IPL_NONE); 144 pool_init(&fs->lfs_segpool, sizeof(struct segment), 0, 0, 0, 145 "lfssegpool", &pool_allocator_nointr, IPL_NONE); 146 /* XXX: should this int32 be 32/64? */ 147 maxbpp = ((lfs_sb_getsumsize(fs) - SEGSUM_SIZE(fs)) / sizeof(int32_t) + 2); 148 maxbpp = MIN(maxbpp, lfs_segsize(fs) / lfs_sb_getfsize(fs) + 2); 149 pool_init(&fs->lfs_bpppool, maxbpp * sizeof(struct buf *), 0, 0, 0, 150 "lfsbpppl", &pool_allocator_nointr, IPL_NONE); 151 } 152 153 void 154 lfs_free_resblks(struct lfs *fs) 155 { 156 int i; 157 158 pool_destroy(&fs->lfs_bpppool); 159 pool_destroy(&fs->lfs_segpool); 160 pool_destroy(&fs->lfs_clpool); 161 162 mutex_enter(&lfs_lock); 163 for (i = 0; i < LFS_N_TOTAL; i++) { 164 while (fs->lfs_resblk[i].inuse) 165 mtsleep(&fs->lfs_resblk, PRIBIO + 1, "lfs_free", 0, 166 &lfs_lock); 167 if (fs->lfs_resblk[i].p != NULL) 168 free(fs->lfs_resblk[i].p, M_SEGMENT); 169 } 170 free(fs->lfs_resblk, M_SEGMENT); 171 mutex_exit(&lfs_lock); 172 } 173 174 static unsigned int 175 lfs_mhash(void *vp) 176 { 177 return (unsigned int)(((unsigned long)vp) >> 2) % LFS_RESHASH_WIDTH; 178 } 179 180 /* 181 * Return memory of the given size for the given purpose, or use one of a 182 * number of spare last-resort buffers, if malloc returns NULL. 183 */ 184 void * 185 lfs_malloc(struct lfs *fs, size_t size, int type) 186 { 187 struct lfs_res_blk *re; 188 void *r; 189 int i, start; 190 unsigned int h; 191 192 ASSERT_MAYBE_SEGLOCK(fs); 193 r = NULL; 194 195 /* If no mem allocated for this type, it just waits */ 196 if (lfs_res_qty[type] == 0) { 197 r = malloc(size, M_SEGMENT, M_WAITOK); 198 return r; 199 } 200 201 /* Otherwise try a quick malloc, and if it works, great */ 202 if ((r = malloc(size, M_SEGMENT, M_NOWAIT)) != NULL) { 203 return r; 204 } 205 206 /* 207 * If malloc returned NULL, we are forced to use one of our 208 * reserve blocks. We have on hand at least one summary block, 209 * at least one cluster block, at least one superblock, 210 * and several indirect blocks. 211 */ 212 213 mutex_enter(&lfs_lock); 214 /* skip over blocks of other types */ 215 for (i = 0, start = 0; i < type; i++) 216 start += lfs_res_qty[i]; 217 while (r == NULL) { 218 for (i = 0; i < lfs_res_qty[type]; i++) { 219 if (fs->lfs_resblk[start + i].inuse == 0) { 220 re = fs->lfs_resblk + start + i; 221 re->inuse = 1; 222 r = re->p; 223 KASSERT(re->size >= size); 224 h = lfs_mhash(r); 225 LIST_INSERT_HEAD(&fs->lfs_reshash[h], re, res); 226 mutex_exit(&lfs_lock); 227 return r; 228 } 229 } 230 DLOG((DLOG_MALLOC, "sleeping on %s (%d)\n", 231 lfs_res_names[type], lfs_res_qty[type])); 232 mtsleep(&fs->lfs_resblk, PVM, "lfs_malloc", 0, 233 &lfs_lock); 234 DLOG((DLOG_MALLOC, "done sleeping on %s\n", 235 lfs_res_names[type])); 236 } 237 /* NOTREACHED */ 238 mutex_exit(&lfs_lock); 239 return r; 240 } 241 242 void 243 lfs_free(struct lfs *fs, void *p, int type) 244 { 245 unsigned int h; 246 res_t *re; 247 248 ASSERT_MAYBE_SEGLOCK(fs); 249 h = lfs_mhash(p); 250 mutex_enter(&lfs_lock); 251 LIST_FOREACH(re, &fs->lfs_reshash[h], res) { 252 if (re->p == p) { 253 KASSERT(re->inuse == 1); 254 LIST_REMOVE(re, res); 255 re->inuse = 0; 256 wakeup(&fs->lfs_resblk); 257 mutex_exit(&lfs_lock); 258 return; 259 } 260 } 261 262 #ifdef notyet /* XXX this assert fires */ 263 for (int i = 0; i < LFS_N_TOTAL; i++) { 264 KDASSERTMSG(fs->lfs_resblk[i].p == p, 265 "lfs_free: inconsistent reserved block"); 266 } 267 #endif 268 269 mutex_exit(&lfs_lock); 270 271 /* 272 * If we didn't find it, free it. 273 */ 274 free(p, M_SEGMENT); 275 } 276 277 /* 278 * lfs_seglock -- 279 * Single thread the segment writer. 280 */ 281 int 282 lfs_seglock(struct lfs *fs, unsigned long flags) 283 { 284 struct segment *sp; 285 286 mutex_enter(&lfs_lock); 287 if (fs->lfs_seglock) { 288 if (fs->lfs_lockpid == curproc->p_pid && 289 fs->lfs_locklwp == curlwp->l_lid) { 290 ++fs->lfs_seglock; 291 fs->lfs_sp->seg_flags |= flags; 292 mutex_exit(&lfs_lock); 293 return 0; 294 } else if (flags & SEGM_PAGEDAEMON) { 295 mutex_exit(&lfs_lock); 296 return EWOULDBLOCK; 297 } else { 298 while (fs->lfs_seglock) { 299 (void)mtsleep(&fs->lfs_seglock, PRIBIO + 1, 300 "lfs_seglock", 0, &lfs_lock); 301 } 302 } 303 } 304 305 fs->lfs_seglock = 1; 306 fs->lfs_lockpid = curproc->p_pid; 307 fs->lfs_locklwp = curlwp->l_lid; 308 mutex_exit(&lfs_lock); 309 fs->lfs_cleanind = 0; 310 311 LFS_ENTER_LOG("seglock", __FILE__, __LINE__, 0, flags, curproc->p_pid); 312 313 /* Drain fragment size changes out */ 314 rw_enter(&fs->lfs_fraglock, RW_WRITER); 315 316 sp = fs->lfs_sp = pool_get(&fs->lfs_segpool, PR_WAITOK); 317 sp->bpp = pool_get(&fs->lfs_bpppool, PR_WAITOK); 318 sp->seg_flags = flags; 319 sp->vp = NULL; 320 sp->seg_iocount = 0; 321 sp->bytes_written = 0; 322 sp->gatherblock_loopcount = 0; 323 (void) lfs_initseg(fs, 0); 324 325 /* 326 * Keep a cumulative count of the outstanding I/O operations. If the 327 * disk drive catches up with us it could go to zero before we finish, 328 * so we artificially increment it by one until we've scheduled all of 329 * the writes we intend to do. 330 */ 331 mutex_enter(&lfs_lock); 332 ++fs->lfs_iocount; 333 fs->lfs_startseg = lfs_sb_getcurseg(fs); 334 mutex_exit(&lfs_lock); 335 return 0; 336 } 337 338 /* 339 * Create a marker inode. 340 */ 341 struct inode * 342 lfs_create_marker(void) 343 { 344 struct inode *marker; 345 346 marker = pool_get(&lfs_inode_pool, PR_WAITOK); 347 memset(marker, 0, sizeof(*marker)); 348 marker->inode_ext.lfs = pool_get(&lfs_inoext_pool, PR_WAITOK); 349 memset(marker->inode_ext.lfs, 0, sizeof(*marker->inode_ext.lfs)); 350 marker->i_state |= IN_MARKER; 351 352 return marker; 353 } 354 355 void 356 lfs_destroy_marker(struct inode *marker) 357 { 358 pool_put(&lfs_inoext_pool, marker->inode_ext.lfs); 359 pool_put(&lfs_inode_pool, marker); 360 } 361 362 static void lfs_unmark_dirop(struct lfs *); 363 364 static void 365 lfs_unmark_dirop(struct lfs *fs) 366 { 367 struct inode *ip, *marker; 368 struct vnode *vp; 369 int doit; 370 371 KASSERT(fs != NULL); 372 ASSERT_NO_SEGLOCK(fs); 373 mutex_enter(&lfs_lock); 374 doit = !(fs->lfs_flags & LFS_UNDIROP); 375 if (doit) 376 fs->lfs_flags |= LFS_UNDIROP; 377 mutex_exit(&lfs_lock); 378 379 if (!doit) 380 return; 381 382 marker = lfs_create_marker(); 383 384 mutex_enter(&lfs_lock); 385 TAILQ_INSERT_HEAD(&fs->lfs_dchainhd, marker, i_lfs_dchain); 386 while ((ip = TAILQ_NEXT(marker, i_lfs_dchain)) != NULL) { 387 TAILQ_REMOVE(&fs->lfs_dchainhd, marker, i_lfs_dchain); 388 TAILQ_INSERT_AFTER(&fs->lfs_dchainhd, ip, marker, 389 i_lfs_dchain); 390 if (ip->i_state & IN_MARKER) 391 continue; 392 vp = ITOV(ip); 393 if ((ip->i_state & (IN_ADIROP | IN_CDIROP)) == IN_CDIROP) { 394 --lfs_dirvcount; 395 --fs->lfs_dirvcount; 396 vp->v_uflag &= ~VU_DIROP; 397 TAILQ_REMOVE(&fs->lfs_dchainhd, ip, i_lfs_dchain); 398 wakeup(&lfs_dirvcount); 399 fs->lfs_unlockvp = vp; 400 mutex_exit(&lfs_lock); 401 vrele(vp); 402 mutex_enter(&lfs_lock); 403 fs->lfs_unlockvp = NULL; 404 ip->i_state &= ~IN_CDIROP; 405 } 406 } 407 TAILQ_REMOVE(&fs->lfs_dchainhd, marker, i_lfs_dchain); 408 fs->lfs_flags &= ~LFS_UNDIROP; 409 wakeup(&fs->lfs_flags); 410 mutex_exit(&lfs_lock); 411 412 lfs_destroy_marker(marker); 413 } 414 415 static void 416 lfs_auto_segclean(struct lfs *fs) 417 { 418 int i, error, waited; 419 420 ASSERT_SEGLOCK(fs); 421 /* 422 * Now that we've swapped lfs_activesb, but while we still 423 * hold the segment lock, run through the segment list marking 424 * the empty ones clean. 425 * XXX - do we really need to do them all at once? 426 */ 427 waited = 0; 428 for (i = 0; i < lfs_sb_getnseg(fs); i++) { 429 if ((fs->lfs_suflags[0][i] & 430 (SEGUSE_ACTIVE | SEGUSE_DIRTY | SEGUSE_EMPTY)) == 431 (SEGUSE_DIRTY | SEGUSE_EMPTY) && 432 (fs->lfs_suflags[1][i] & 433 (SEGUSE_ACTIVE | SEGUSE_DIRTY | SEGUSE_EMPTY)) == 434 (SEGUSE_DIRTY | SEGUSE_EMPTY)) { 435 436 /* Make sure the sb is written before we clean */ 437 mutex_enter(&lfs_lock); 438 while (waited == 0 && fs->lfs_sbactive) 439 mtsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs asb", 440 0, &lfs_lock); 441 mutex_exit(&lfs_lock); 442 waited = 1; 443 444 if ((error = lfs_do_segclean(fs, i, curlwp->l_cred, 445 curlwp)) != 0) { 446 DLOG((DLOG_CLEAN, "lfs_auto_segclean: lfs_do_segclean returned %d for seg %d\n", error, i)); 447 } 448 } 449 fs->lfs_suflags[1 - fs->lfs_activesb][i] = 450 fs->lfs_suflags[fs->lfs_activesb][i]; 451 } 452 } 453 454 /* 455 * lfs_segunlock -- 456 * Single thread the segment writer. 457 */ 458 void 459 lfs_segunlock(struct lfs *fs) 460 { 461 struct segment *sp; 462 unsigned long sync, ckp; 463 struct buf *bp; 464 int do_unmark_dirop = 0; 465 466 sp = fs->lfs_sp; 467 468 mutex_enter(&lfs_lock); 469 470 if (!LFS_SEGLOCK_HELD(fs)) 471 panic("lfs seglock not held"); 472 473 if (fs->lfs_seglock == 1) { 474 if ((sp->seg_flags & (SEGM_PROT | SEGM_CLEAN)) == 0) 475 do_unmark_dirop = 1; 476 mutex_exit(&lfs_lock); 477 sync = sp->seg_flags & SEGM_SYNC; 478 ckp = sp->seg_flags & SEGM_CKP; 479 480 /* We should have a segment summary, and nothing else */ 481 KASSERT(sp->cbpp == sp->bpp + 1); 482 483 /* Free allocated segment summary */ 484 lfs_sb_suboffset(fs, lfs_btofsb(fs, lfs_sb_getsumsize(fs))); 485 bp = *sp->bpp; 486 lfs_freebuf(fs, bp); 487 488 pool_put(&fs->lfs_bpppool, sp->bpp); 489 sp->bpp = NULL; 490 491 /* 492 * If we're not sync, we're done with sp, get rid of it. 493 * Otherwise, we keep a local copy around but free 494 * fs->lfs_sp so another process can use it (we have to 495 * wait but they don't have to wait for us). 496 */ 497 if (!sync) 498 pool_put(&fs->lfs_segpool, sp); 499 fs->lfs_sp = NULL; 500 501 /* 502 * If the I/O count is non-zero, sleep until it reaches zero. 503 * At the moment, the user's process hangs around so we can 504 * sleep. 505 */ 506 mutex_enter(&lfs_lock); 507 if (--fs->lfs_iocount <= 1) 508 wakeup(&fs->lfs_iocount); 509 mutex_exit(&lfs_lock); 510 511 /* 512 * If we're not checkpointing, we don't have to block 513 * other processes to wait for a synchronous write 514 * to complete. 515 */ 516 if (!ckp) { 517 LFS_ENTER_LOG("segunlock_std", __FILE__, __LINE__, 0, 0, curproc->p_pid); 518 519 mutex_enter(&lfs_lock); 520 --fs->lfs_seglock; 521 fs->lfs_lockpid = 0; 522 fs->lfs_locklwp = 0; 523 mutex_exit(&lfs_lock); 524 wakeup(&fs->lfs_seglock); 525 } 526 /* 527 * We let checkpoints happen asynchronously. That means 528 * that during recovery, we have to roll forward between 529 * the two segments described by the first and second 530 * superblocks to make sure that the checkpoint described 531 * by a superblock completed. 532 */ 533 mutex_enter(&lfs_lock); 534 while (ckp && sync && fs->lfs_iocount) { 535 (void)mtsleep(&fs->lfs_iocount, PRIBIO + 1, 536 "lfs_iocount", 0, &lfs_lock); 537 DLOG((DLOG_SEG, "sleeping on iocount %x == %d\n", fs, fs->lfs_iocount)); 538 } 539 while (sync && sp->seg_iocount) { 540 (void)mtsleep(&sp->seg_iocount, PRIBIO + 1, 541 "seg_iocount", 0, &lfs_lock); 542 DLOG((DLOG_SEG, "sleeping on iocount %x == %d\n", sp, sp->seg_iocount)); 543 } 544 mutex_exit(&lfs_lock); 545 if (sync) 546 pool_put(&fs->lfs_segpool, sp); 547 548 if (ckp) { 549 fs->lfs_nactive = 0; 550 /* If we *know* everything's on disk, write both sbs */ 551 /* XXX should wait for this one */ 552 if (sync) 553 lfs_writesuper(fs, lfs_sb_getsboff(fs, fs->lfs_activesb)); 554 lfs_writesuper(fs, lfs_sb_getsboff(fs, 1 - fs->lfs_activesb)); 555 if (!(fs->lfs_ivnode->v_mount->mnt_iflag & IMNT_UNMOUNT)) { 556 lfs_auto_segclean(fs); 557 /* If sync, we can clean the remainder too */ 558 if (sync) 559 lfs_auto_segclean(fs); 560 } 561 fs->lfs_activesb = 1 - fs->lfs_activesb; 562 563 LFS_ENTER_LOG("segunlock_ckp", __FILE__, __LINE__, 0, 0, curproc->p_pid); 564 565 mutex_enter(&lfs_lock); 566 --fs->lfs_seglock; 567 fs->lfs_lockpid = 0; 568 fs->lfs_locklwp = 0; 569 mutex_exit(&lfs_lock); 570 wakeup(&fs->lfs_seglock); 571 } 572 /* Reenable fragment size changes */ 573 rw_exit(&fs->lfs_fraglock); 574 if (do_unmark_dirop) 575 lfs_unmark_dirop(fs); 576 } else { 577 --fs->lfs_seglock; 578 KASSERT(fs->lfs_seglock != 0); 579 mutex_exit(&lfs_lock); 580 } 581 } 582 583 /* 584 * Single thread the cleaner. 585 */ 586 int 587 lfs_cleanerlock(struct lfs *fs) 588 { 589 int error; 590 591 mutex_enter(&lfs_lock); 592 while (fs->lfs_cleanlock) { 593 printf("cleanlock=%p, waiting\n", fs->lfs_cleanlock); 594 error = cv_wait_sig(&fs->lfs_cleanercv, &lfs_lock); 595 if (error) 596 break; 597 } 598 if (error == 0) 599 fs->lfs_cleanlock = curlwp; 600 mutex_exit(&lfs_lock); 601 602 return error; 603 } 604 605 /* 606 * Check whether we hold the cleaner lock. 607 */ 608 int 609 lfs_cleanerlock_held(struct lfs *fs) 610 { 611 int retval = 0; 612 613 mutex_enter(&lfs_lock); 614 retval = (fs->lfs_cleanlock == curlwp); 615 mutex_exit(&lfs_lock); 616 617 return retval; 618 } 619 620 /* 621 * Single thread the cleaner. 622 */ 623 void 624 lfs_cleanerunlock(struct lfs *fs) 625 { 626 struct inode *ip; 627 628 /* Clear out the cleaning list */ 629 while ((ip = TAILQ_FIRST(&fs->lfs_cleanhd)) != NULL) 630 lfs_clrclean(fs, ITOV(ip)); 631 632 mutex_enter(&lfs_lock); 633 fs->lfs_cleanlock = 0x0; 634 cv_broadcast(&fs->lfs_cleanercv); 635 mutex_exit(&lfs_lock); 636 } 637 638 /* 639 * Drain dirops and start writer. 640 * 641 * No simple_locks are held when we enter and none are held when we return. 642 */ 643 void 644 lfs_writer_enter(struct lfs *fs, const char *wmesg) 645 { 646 int error __diagused; 647 648 ASSERT_NO_SEGLOCK(fs); 649 mutex_enter(&lfs_lock); 650 651 /* disallow dirops during flush */ 652 fs->lfs_writer++; 653 654 while (fs->lfs_dirops > 0) { 655 ++fs->lfs_diropwait; 656 error = mtsleep(&fs->lfs_writer, PRIBIO+1, wmesg, 0, 657 &lfs_lock); 658 KASSERT(error == 0); 659 --fs->lfs_diropwait; 660 } 661 662 mutex_exit(&lfs_lock); 663 } 664 665 int 666 lfs_writer_tryenter(struct lfs *fs) 667 { 668 int writer_set; 669 670 ASSERT_MAYBE_SEGLOCK(fs); 671 mutex_enter(&lfs_lock); 672 writer_set = (fs->lfs_dirops == 0); 673 if (writer_set) 674 fs->lfs_writer++; 675 mutex_exit(&lfs_lock); 676 677 return writer_set; 678 } 679 680 void 681 lfs_writer_leave(struct lfs *fs) 682 { 683 bool dowakeup; 684 685 ASSERT_MAYBE_SEGLOCK(fs); 686 mutex_enter(&lfs_lock); 687 dowakeup = !(--fs->lfs_writer); 688 if (dowakeup) 689 cv_broadcast(&fs->lfs_diropscv); 690 mutex_exit(&lfs_lock); 691 } 692 693 /* 694 * Unlock, wait for the cleaner, then relock to where we were before. 695 * To be used only at a fairly high level, to address a paucity of free 696 * segments propagated back from lfs_gop_write(). 697 */ 698 void 699 lfs_segunlock_relock(struct lfs *fs) 700 { 701 int n = fs->lfs_seglock; 702 u_int16_t seg_flags; 703 CLEANERINFO *cip; 704 struct buf *bp; 705 706 if (n == 0) 707 return; 708 709 /* Write anything we've already gathered to disk */ 710 lfs_writeseg(fs, fs->lfs_sp); 711 712 /* Tell cleaner */ 713 LFS_CLEANERINFO(cip, fs, bp); 714 lfs_ci_setflags(fs, cip, 715 lfs_ci_getflags(fs, cip) | LFS_CLEANER_MUST_CLEAN); 716 LFS_SYNC_CLEANERINFO(cip, fs, bp, 1); 717 718 /* Save segment flags for later */ 719 seg_flags = fs->lfs_sp->seg_flags; 720 721 fs->lfs_sp->seg_flags |= SEGM_PROT; /* Don't unmark dirop nodes */ 722 while(fs->lfs_seglock) 723 lfs_segunlock(fs); 724 725 /* Wait for the cleaner */ 726 lfs_wakeup_cleaner(fs); 727 mutex_enter(&lfs_lock); 728 while (LFS_STARVED_FOR_SEGS(fs)) 729 mtsleep(&fs->lfs_availsleep, PRIBIO, "relock", 0, 730 &lfs_lock); 731 mutex_exit(&lfs_lock); 732 733 /* Put the segment lock back the way it was. */ 734 while(n--) 735 lfs_seglock(fs, seg_flags); 736 737 /* Cleaner can relax now */ 738 LFS_CLEANERINFO(cip, fs, bp); 739 lfs_ci_setflags(fs, cip, 740 lfs_ci_getflags(fs, cip) & ~LFS_CLEANER_MUST_CLEAN); 741 LFS_SYNC_CLEANERINFO(cip, fs, bp, 1); 742 743 return; 744 } 745 746 /* 747 * Wake up the cleaner, provided that nowrap is not set. 748 */ 749 void 750 lfs_wakeup_cleaner(struct lfs *fs) 751 { 752 if (fs->lfs_nowrap > 0) 753 return; 754 755 cv_broadcast(&fs->lfs_nextsegsleep); 756 cv_broadcast(&lfs_allclean_wakeup); 757 } 758 759 /* 760 * If it wasn't already on the cleaning list, 761 * add it and take a reference. We will clear 762 * the list before dropping the seglock. 763 */ 764 void 765 lfs_setclean(struct lfs *fs, struct vnode *vp) 766 { 767 struct inode *ip; 768 769 KASSERT(lfs_cleanerlock_held(fs)); 770 771 ip = VTOI(vp); 772 if (ip->i_state & IN_CLEANING) 773 return; 774 775 vref(vp); 776 TAILQ_INSERT_HEAD(&fs->lfs_cleanhd, ip, i_lfs_clean); 777 LFS_SET_UINO(VTOI(vp), IN_CLEANING); 778 } 779 780 /* 781 * Remove a vnode from the cleaning list, 782 * clear IN_CLEANING and drop the reference. 783 * Find any invalid buffers on the vnode and 784 * toss them. 785 */ 786 void 787 lfs_clrclean(struct lfs *fs, struct vnode *vp) 788 { 789 struct inode *ip; 790 791 KASSERT(lfs_cleanerlock_held(fs)); 792 793 ip = VTOI(vp); 794 if (!(ip->i_state & IN_CLEANING)) 795 return; 796 797 if (vp->v_type == VREG && vp != fs->lfs_ivnode) 798 lfs_ungather(fs, NULL, vp, lfs_match_data); 799 800 TAILQ_REMOVE(&fs->lfs_cleanhd, ip, i_lfs_clean); 801 LFS_CLR_UINO(VTOI(vp), IN_CLEANING); 802 vrele(vp); 803 } 804