Lines Matching defs:chmp
69 chfs_gc_trigger(struct chfs_mount *chmp)
71 struct garbage_collector_thread *gc = &chmp->chm_gc_thread;
74 chfs_gc_thread_should_wake(chmp)) {
84 struct chfs_mount *chmp = data;
85 struct garbage_collector_thread *gc = &chmp->chm_gc_thread;
89 mutex_enter(&chmp->chm_lock_mountfields);
95 if (chfs_gc_thread_should_wake(chmp)) {
96 if (chfs_gcollect_pass(chmp) == ENOSPC) {
97 mutex_exit(&chmp->chm_lock_mountfields);
108 &chmp->chm_lock_mountfields, mstohz(100));
110 mutex_exit(&chmp->chm_lock_mountfields);
118 chfs_gc_thread_start(struct chfs_mount *chmp)
120 struct garbage_collector_thread *gc = &chmp->chm_gc_thread;
126 NULL, chfs_gc_thread, chmp, &gc->gcth_thread,
132 chfs_gc_thread_stop(struct chfs_mount *chmp)
134 struct garbage_collector_thread *gc = &chmp->chm_gc_thread;
159 chfs_gc_thread_should_wake(struct chfs_mount *chmp)
165 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
168 if (!TAILQ_EMPTY(&chmp->chm_erase_pending_queue)) {
174 if (chmp->chm_unchecked_size) {
179 dirty = chmp->chm_dirty_size - chmp->chm_nr_erasable_blocks *
180 chmp->chm_ebh->eb_size;
183 if (chmp->chm_nr_free_blocks + chmp->chm_nr_erasable_blocks <
184 chmp->chm_resv_blocks_gctrigger && (dirty > chmp->chm_nospc_dirty)) {
186 chmp->chm_nr_free_blocks, chmp->chm_nr_erasable_blocks,
187 chmp->chm_resv_blocks_gctrigger);
189 dirty, chmp->chm_nospc_dirty);
195 TAILQ_FOREACH(cheb, &chmp->chm_very_dirty_queue, queue) {
197 if (nr_very_dirty == chmp->chm_vdirty_blocks_gctrigger) {
209 chfs_gc_release_inode(struct chfs_mount *chmp,
217 chfs_gc_fetch_inode(struct chfs_mount *chmp, ino_t vno,
227 vp = chfs_vnode_lookup(chmp, vno);
229 mutex_enter(&chmp->chm_lock_vnocache);
230 vc = chfs_vnode_cache_get(chmp, vno);
232 mutex_exit(&chmp->chm_lock_vnocache);
235 mutex_exit(&chmp->chm_lock_vnocache);
238 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
240 &chmp->chm_gc_thread.gcth_wakeup,
241 &chmp->chm_lock_mountfields, mstohz(50));
247 vp = chfs_vnode_lookup(chmp, vno);
261 chfs_check(struct chfs_mount *chmp, struct chfs_vnode_cache *chvc)
263 KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
287 mutex_exit(&chmp->chm_lock_vnocache);
288 ret = chfs_read_inode_internal(chmp, ip);
289 mutex_enter(&chmp->chm_lock_vnocache);
291 chfs_clear_inode(chmp, ip);
302 chfs_clear_inode(struct chfs_mount *chmp, struct chfs_inode *ip)
304 KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
323 chfs_remove_and_obsolete(chmp, chvc, nref, &chvc->v);
327 chfs_kill_fragtree(chmp, &ip->fragtree);
340 chfs_vnode_cache_remove(chmp, chvc);
346 find_gc_block(struct chfs_mount *chmp)
351 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
358 if (n<50 && !TAILQ_EMPTY(&chmp->chm_erase_pending_queue)) {
360 nextqueue = &chmp->chm_erase_pending_queue;
361 } else if (n<110 && !TAILQ_EMPTY(&chmp->chm_very_dirty_queue) ) {
363 nextqueue = &chmp->chm_very_dirty_queue;
364 } else if (n<126 && !TAILQ_EMPTY(&chmp->chm_dirty_queue) ) {
366 nextqueue = &chmp->chm_dirty_queue;
367 } else if (!TAILQ_EMPTY(&chmp->chm_clean_queue)) {
369 nextqueue = &chmp->chm_clean_queue;
370 } else if (!TAILQ_EMPTY(&chmp->chm_dirty_queue)) {
373 nextqueue = &chmp->chm_dirty_queue;
374 } else if (!TAILQ_EMPTY(&chmp->chm_very_dirty_queue)) {
377 nextqueue = &chmp->chm_very_dirty_queue;
378 } else if (!TAILQ_EMPTY(&chmp->chm_erase_pending_queue)) {
381 nextqueue = &chmp->chm_erase_pending_queue;
382 } else if (!TAILQ_EMPTY(&chmp->chm_erasable_pending_wbuf_queue)) {
385 rw_enter(&chmp->chm_lock_wbuf, RW_WRITER);
386 chfs_flush_pending_wbuf(chmp);
387 rw_exit(&chmp->chm_lock_wbuf);
397 if (chmp->chm_nextblock) {
399 chmp->chm_nextblock->lnr, ret->lnr);
400 if (ret == chmp->chm_nextblock)
406 chmp->chm_gcblock = ret;
421 chfs_gcollect_pass(struct chfs_mount *chmp)
432 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
436 mutex_enter(&chmp->chm_lock_sizes);
439 dbg_gc("unchecked size == %u\n", chmp->chm_unchecked_size);
440 if (!chmp->chm_unchecked_size)
444 if (chmp->chm_checked_vno > chmp->chm_max_vno) {
445 mutex_exit(&chmp->chm_lock_sizes);
447 (unsigned long long)chmp->chm_checked_vno,
448 (unsigned long long)chmp->chm_max_vno);
452 mutex_exit(&chmp->chm_lock_sizes);
454 mutex_enter(&chmp->chm_lock_vnocache);
456 (unsigned long long)chmp->chm_checked_vno);
460 vc = chfs_vnode_cache_get(chmp, chmp->chm_checked_vno++);
464 mutex_exit(&chmp->chm_lock_vnocache);
470 mutex_exit(&chmp->chm_lock_vnocache);
480 mutex_exit(&chmp->chm_lock_vnocache);
486 mutex_exit(&chmp->chm_lock_vnocache);
491 chmp->chm_checked_vno--;
492 mutex_exit(&chmp->chm_lock_vnocache);
499 mutex_exit(&chmp->chm_lock_vnocache);
515 ret = chfs_check(chmp, vc);
518 mutex_exit(&chmp->chm_lock_vnocache);
523 eb = chmp->chm_gcblock;
526 eb = find_gc_block(chmp);
531 if (!TAILQ_EMPTY(&chmp->chm_erase_pending_queue)) {
532 mutex_exit(&chmp->chm_lock_sizes);
535 mutex_exit(&chmp->chm_lock_sizes);
552 if (nref == chmp->chm_blocks[nref->nref_lnr].last_node) {
559 mutex_exit(&chmp->chm_lock_sizes);
566 KASSERT(nref->nref_lnr == chmp->chm_gcblock->lnr);
571 mutex_exit(&chmp->chm_lock_sizes);
573 chfs_gcollect_pristine(chmp, eb, NULL, nref);
575 chfs_mark_node_obsolete(chmp, nref);
580 mutex_exit(&chmp->chm_lock_sizes);
582 mutex_enter(&chmp->chm_lock_vnocache);
604 mutex_exit(&chmp->chm_lock_vnocache);
611 mutex_exit(&chmp->chm_lock_vnocache);
619 mutex_exit(&chmp->chm_lock_vnocache);
620 ret = chfs_gcollect_pristine(chmp, eb, NULL, nref);
622 //TODO wake_up(&chmp->chm_vnocache_wq);
625 mutex_enter(&chmp->chm_lock_vnocache);
632 mutex_exit(&chmp->chm_lock_vnocache);
634 ip = chfs_gc_fetch_inode(chmp, vno, !(pvno | nlink));
642 chfs_gcollect_live(chmp, eb, nref, ip);
644 chfs_gc_release_inode(chmp, ip);
656 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
657 mutex_enter(&chmp->chm_lock_sizes);
661 if (chmp->chm_gcblock) {
663 dbg_gc("eb used size = %u\n", chmp->chm_gcblock->used_size);
664 dbg_gc("eb free size = %u\n", chmp->chm_gcblock->free_size);
665 dbg_gc("eb dirty size = %u\n", chmp->chm_gcblock->dirty_size);
667 chmp->chm_gcblock->unchecked_size);
668 dbg_gc("eb wasted size = %u\n", chmp->chm_gcblock->wasted_size);
670 KASSERT(chmp->chm_gcblock->used_size + chmp->chm_gcblock->free_size +
671 chmp->chm_gcblock->dirty_size +
672 chmp->chm_gcblock->unchecked_size +
673 chmp->chm_gcblock->wasted_size == chmp->chm_ebh->eb_size);
678 if (chmp->chm_gcblock && chmp->chm_gcblock->dirty_size +
679 chmp->chm_gcblock->wasted_size == chmp->chm_ebh->eb_size) {
681 "Moving to erase_pending_queue\n", chmp->chm_gcblock->lnr);
682 TAILQ_INSERT_TAIL(&chmp->chm_erase_pending_queue,
683 chmp->chm_gcblock, queue);
684 chmp->chm_gcblock = NULL;
685 chmp->chm_nr_erasable_blocks++;
686 if (!TAILQ_EMPTY(&chmp->chm_erase_pending_queue)) {
687 ret = chfs_remap_leb(chmp);
691 mutex_exit(&chmp->chm_lock_sizes);
699 chfs_gcollect_pristine(struct chfs_mount *chmp, struct chfs_eraseblock *cheb,
709 size_t totlen = chfs_nref_len(chmp, cheb, nref);
720 ret = chfs_read_leb(chmp, nref->nref_lnr, data, ofs, totlen, &retlen);
793 ret = chfs_reserve_space_gc(chmp, totlen);
797 newnref = chfs_alloc_node_ref(chmp->chm_nextblock);
803 ofs = chmp->chm_ebh->eb_size - chmp->chm_nextblock->free_size;
809 mutex_enter(&chmp->chm_lock_sizes);
810 ret = chfs_write_wbuf(chmp, &vec, 1, ofs, &retlen);
818 chfs_change_size_dirty(chmp, chmp->chm_nextblock, totlen);
820 mutex_exit(&chmp->chm_lock_sizes);
827 mutex_exit(&chmp->chm_lock_sizes);
832 mutex_exit(&chmp->chm_lock_sizes);
834 mutex_enter(&chmp->chm_lock_vnocache);
835 chfs_add_vnode_ref_to_vc(chmp, chvc, newnref);
836 mutex_exit(&chmp->chm_lock_vnocache);
847 chfs_gcollect_live(struct chfs_mount *chmp,
860 if (chmp->chm_gcblock != cheb) {
872 chfs_gcollect_vnode(chmp, ip);
893 ret = chfs_gcollect_pristine(chmp,
901 ret = chfs_gcollect_dnode(chmp, cheb, ip, fn, start, end);
917 ret = chfs_gcollect_dirent(chmp, cheb, ip, fd);
920 ret = chfs_gcollect_deletion_dirent(chmp, cheb, ip, fd);
938 chfs_gcollect_vnode(struct chfs_mount *chmp, struct chfs_inode *ip)
945 ret = chfs_write_flash_vnode(chmp, ip, ALLOC_GC);
952 chfs_gcollect_dirent(struct chfs_mount *chmp,
961 vnode = chfs_vnode_lookup(chmp, fd->vno);
972 mutex_enter(&chmp->chm_lock_vnocache);
973 chfs_remove_and_obsolete(chmp, parent->chvc, fd->nref,
975 mutex_exit(&chmp->chm_lock_vnocache);
978 return chfs_write_flash_dirent(chmp,
987 chfs_gcollect_deletion_dirent(struct chfs_mount *chmp,
1004 nref_len = chfs_nref_len(chmp, cheb, fd->nref);
1006 /* XXX This was a noop (void)chfs_vnode_lookup(chmp, fd->vno); */
1017 if (chfs_nref_len(chmp, NULL, nref) != nref_len)
1026 ret = chfs_read_leb(chmp,
1059 mutex_enter(&chmp->chm_lock_vnocache);
1060 chfs_remove_and_obsolete(chmp, parent->chvc, fd->nref,
1062 mutex_exit(&chmp->chm_lock_vnocache);
1063 return chfs_write_flash_dirent(chmp,
1075 chfs_gcollect_dnode(struct chfs_mount *chmp,
1092 totlen = chfs_nref_len(chmp, orig_cheb, fn->nref);
1096 ret = chfs_read_leb(chmp, fn->nref->nref_lnr, data, fn->nref->nref_offset,
1109 ret = chfs_reserve_space_gc(chmp, totlen);
1113 nref = chfs_alloc_node_ref(chmp->chm_nextblock);
1119 mutex_enter(&chmp->chm_lock_sizes);
1121 nref->nref_offset = chmp->chm_ebh->eb_size - chmp->chm_nextblock->free_size;
1123 chfs_change_size_free(chmp, chmp->chm_nextblock, -totlen);
1126 ret = chfs_write_wbuf(chmp, &vec, 1, nref->nref_offset, &retlen);
1132 chfs_change_size_dirty(chmp, chmp->chm_nextblock, totlen);
1135 mutex_exit(&chmp->chm_lock_sizes);
1141 mutex_exit(&chmp->chm_lock_sizes);
1147 chfs_change_size_used(chmp, &chmp->chm_blocks[nref->nref_lnr], totlen);
1148 mutex_exit(&chmp->chm_lock_sizes);
1149 KASSERT(chmp->chm_blocks[nref->nref_lnr].used_size <= chmp->chm_ebh->eb_size);
1158 mutex_enter(&chmp->chm_lock_vnocache);
1160 chfs_remove_frags_of_node(chmp, &ip->fragtree, fn->nref);
1161 chfs_remove_and_obsolete(chmp, ip->chvc, fn->nref, &ip->chvc->dnode);
1164 chfs_add_full_dnode_to_inode(chmp, ip, newfn);
1165 chfs_add_node_to_list(chmp,
1167 mutex_exit(&chmp->chm_lock_vnocache);