Home | History | Annotate | Line # | Download | only in chfs
chfs_nodeops.c revision 1.1.6.2
      1 /*	$NetBSD: chfs_nodeops.c,v 1.1.6.2 2012/04/17 00:08:54 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2010 Department of Software Engineering,
      5  *		      University of Szeged, Hungary
      6  * Copyright (C) 2010 David Tengeri <dtengeri (at) inf.u-szeged.hu>
      7  * Copyright (C) 2010 Tamas Toth <ttoth (at) inf.u-szeged.hu>
      8  * Copyright (C) 2010 Adam Hoka <ahoka (at) NetBSD.org>
      9  * All rights reserved.
     10  *
     11  * This code is derived from software contributed to The NetBSD Foundation
     12  * by the Department of Software Engineering, University of Szeged, Hungary
     13  *
     14  * Redistribution and use in source and binary forms, with or without
     15  * modification, are permitted provided that the following conditions
     16  * are met:
     17  * 1. Redistributions of source code must retain the above copyright
     18  *    notice, this list of conditions and the following disclaimer.
     19  * 2. Redistributions in binary form must reproduce the above copyright
     20  *    notice, this list of conditions and the following disclaimer in the
     21  *    documentation and/or other materials provided with the distribution.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33  * SUCH DAMAGE.
     34  */
     35 
     36 #include "chfs.h"
     37 
     38 /**
     39  * chfs_update_eb_dirty - updates dirty and free space, first and
     40  *			      last node references
     41  * @sbi: CHFS main descriptor structure
     42  * @cheb: eraseblock to update
     43  * @size: increase dirty space size with this
     44  * Returns zero in case of success, %1 in case of fail.
     45  */
     46 int
     47 chfs_update_eb_dirty(struct chfs_mount *chmp,
     48     struct chfs_eraseblock *cheb, uint32_t size)
     49 {
     50 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
     51 	KASSERT(!mutex_owned(&chmp->chm_lock_sizes));
     52 
     53 	if (!size)
     54 		return 0;
     55 
     56 	if (size > cheb->free_size) {
     57 		chfs_err("free_size (%d) is less then dirty space (%d) "
     58 		    "on block (%d)\n", cheb->free_size, size, cheb->lnr);
     59 		return 1;
     60 	}
     61 	mutex_enter(&chmp->chm_lock_sizes);
     62 	//dbg("BEFORE: free_size: %d\n", cheb->free_size);
     63 	chfs_change_size_free(chmp, cheb, -size);
     64 	chfs_change_size_dirty(chmp, cheb, size);
     65 	//dbg(" AFTER: free_size: %d\n", cheb->free_size);
     66 	mutex_exit(&chmp->chm_lock_sizes);
     67 	return 0;
     68 }
     69 
     70 /**
     71  * chfs_add_node_to_list - adds a data node ref to vnode cache's dnode list
     72  * @sbi: super block informations
     73  * @new: node ref to insert
     74  * @list: head of the list
     75  * This function inserts a data node ref to the list of vnode cache.
     76  * The list is sorted by data node's lnr and offset.
     77  */
     78 void
     79 chfs_add_node_to_list(struct chfs_mount *chmp,
     80     struct chfs_vnode_cache *vc,
     81     struct chfs_node_ref *new, struct chfs_node_ref **list)
     82 {
     83 	struct chfs_node_ref *nextref = *list;
     84 	struct chfs_node_ref *prevref = NULL;
     85 
     86 	while (nextref && nextref != (struct chfs_node_ref *)vc &&
     87 	    (nextref->nref_lnr <= new->nref_lnr)) {
     88 		if (nextref->nref_lnr == new->nref_lnr) {
     89 			while (nextref && nextref !=
     90 			    (struct chfs_node_ref *)vc &&
     91 			    (CHFS_GET_OFS(nextref->nref_offset) <
     92 				CHFS_GET_OFS(new->nref_offset))) {
     93 				prevref = nextref;
     94 				nextref = nextref->nref_next;
     95 			}
     96 			break;
     97 		}
     98 		prevref = nextref;
     99 		nextref = nextref->nref_next;
    100 	}
    101 
    102 	if (nextref && nextref != (struct chfs_node_ref *)vc &&
    103 	    nextref->nref_lnr == new->nref_lnr &&
    104 	    CHFS_GET_OFS(nextref->nref_offset) ==
    105 	    CHFS_GET_OFS(new->nref_offset)) {
    106 		new->nref_next = nextref->nref_next;
    107 	} else {
    108 		new->nref_next = nextref;
    109 	}
    110 
    111 	if (prevref) {
    112 		prevref->nref_next = new;
    113 	} else {
    114 		*list = new;
    115 	}
    116 }
    117 
    118 void
    119 chfs_add_fd_to_inode(struct chfs_mount *chmp,
    120     struct chfs_inode *parent, struct chfs_dirent *new)
    121 {
    122 //	struct chfs_dirent **prev = &parent->dents;
    123 	struct chfs_dirent *fd, *tmpfd;
    124 
    125 	if (new->version > parent->chvc->highest_version) {
    126 		parent->chvc->highest_version = new->version;
    127 	}
    128 
    129 	//mutex_enter(&parent->inode_lock);
    130 	TAILQ_FOREACH_SAFE(fd, &parent->dents, fds, tmpfd) {
    131 		if (fd->nhash > new->nhash) {
    132 			/* insert new before fd */
    133 			TAILQ_INSERT_BEFORE(fd, new, fds);
    134 			return;
    135 		} else if (fd->nhash == new->nhash &&
    136 		    !strcmp(fd->name, new->name)) {
    137 			if (new->version > fd->version) {
    138 //				new->next = fd->next;
    139 				/* replace fd with new */
    140 				TAILQ_INSERT_BEFORE(fd, new, fds);
    141 				TAILQ_REMOVE(&parent->dents, fd, fds);
    142 				if (fd->nref) {
    143 					chfs_mark_node_obsolete(chmp,
    144 					    fd->nref);
    145 				}
    146 				chfs_free_dirent(fd);
    147 //				*prev = new;//XXX
    148 			} else {
    149 				chfs_mark_node_obsolete(chmp, new->nref);
    150 				chfs_free_dirent(new);
    151 			}
    152 			return;
    153 		}
    154 	}
    155 	/* if we couldnt fit it elsewhere, lets add to the end */
    156 	/* FIXME insert tail or insert head? */
    157 	TAILQ_INSERT_HEAD(&parent->dents, new, fds);
    158 	//mutex_exit(&parent->inode_lock);
    159 #if 0
    160    	while ((*prev) && (*prev)->nhash <= new->nhash) {
    161 		if ((*prev)->nhash == new->nhash &&
    162 		    !strcmp((*prev)->name, new->name)) {
    163 			if (new->version > (*prev)->version) {
    164 				new->next = (*prev)->next;
    165 				if ((*prev)->nref) {
    166 					chfs_mark_node_obsolete(chmp,
    167 					    (*prev)->nref);
    168 				}
    169 				chfs_free_dirent(*prev);
    170 				*prev = new;
    171 			} else {
    172 				chfs_mark_node_obsolete(chmp, new->nref);
    173 				chfs_free_dirent(new);
    174 			}
    175 			return;
    176 		}
    177 		prev = &((*prev)->next);
    178 	}
    179 
    180 	new->next = *prev;
    181 	*prev = new;
    182 #endif
    183 }
    184 
    185 void
    186 chfs_add_vnode_ref_to_vc(struct chfs_mount *chmp,
    187     struct chfs_vnode_cache *vc, struct chfs_node_ref *new)
    188 {
    189 	if ((struct chfs_vnode_cache*)(vc->v) != vc) {
    190 		chfs_mark_node_obsolete(chmp, vc->v);
    191 		new->nref_next = vc->v->nref_next;
    192 	} else {
    193 		new->nref_next = vc->v;
    194 	}
    195 	vc->v = new;
    196 }
    197 
    198 struct chfs_node_ref *
    199 chfs_nref_next(struct chfs_node_ref *nref)
    200 {
    201 //	dbg("check nref: %u - %u\n", nref->nref_lnr, nref->nref_offset);
    202 	nref++;
    203 //	dbg("next nref: %u - %u\n", nref->nref_lnr, nref->nref_offset);
    204 	if (nref->nref_lnr == REF_LINK_TO_NEXT) {
    205 		//End of chain
    206 		if (!nref->nref_next)
    207 			return NULL;
    208 
    209 		nref = nref->nref_next;
    210 	}
    211 	//end of chain
    212 	if (nref->nref_lnr == REF_EMPTY_NODE)
    213 		return NULL;
    214 
    215 	return nref;
    216 }
    217 
    218 int
    219 chfs_nref_len(struct chfs_mount *chmp,
    220     struct chfs_eraseblock *cheb, struct chfs_node_ref *nref)
    221 {
    222 	struct chfs_node_ref *next;
    223 
    224 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    225 
    226 	if (!cheb)
    227 		cheb = &chmp->chm_blocks[nref->nref_lnr];
    228 
    229 	next = chfs_nref_next(nref);
    230 
    231 	if (!next) {
    232 		//dbg("next null\n");
    233 		return chmp->chm_ebh->eb_size - cheb->free_size -
    234 		    CHFS_GET_OFS(nref->nref_offset);
    235 	}
    236 	//dbg("size: %d\n", CHFS_GET_OFS(next->nref_offset) - CHFS_GET_OFS(nref->nref_offset));
    237 	return CHFS_GET_OFS(next->nref_offset) -
    238 	    CHFS_GET_OFS(nref->nref_offset);
    239 }
    240 
    241 /**
    242  * chfs_mark_node_obsolete - marks a node obsolete
    243  */
    244 void
    245 chfs_mark_node_obsolete(struct chfs_mount *chmp,
    246     struct chfs_node_ref *nref)
    247 {
    248 	int len;
    249 	struct chfs_eraseblock *cheb;
    250 
    251 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    252 
    253 	KASSERT(!CHFS_REF_OBSOLETE(nref));
    254 
    255 	KASSERT(nref->nref_lnr <= chmp->chm_ebh->peb_nr);
    256 	cheb = &chmp->chm_blocks[nref->nref_lnr];
    257 
    258 #ifdef DIAGNOSTIC
    259 	if (cheb->used_size + cheb->free_size + cheb->dirty_size +
    260 	    cheb->unchecked_size + cheb->wasted_size != chmp->chm_ebh->eb_size) {
    261 		dbg("eraseblock leak detected!\nused: %u\nfree: %u\n"
    262 		    "dirty: %u\nunchecked: %u\nwasted: %u\ntotal: %u\nshould be: %zu\n",
    263 		    cheb->used_size, cheb->free_size, cheb->dirty_size,
    264 		    cheb->unchecked_size, cheb->wasted_size, cheb->used_size + cheb->free_size +
    265 		    cheb->dirty_size + cheb->unchecked_size + cheb->wasted_size,
    266 		    chmp->chm_ebh->eb_size);
    267 	}
    268 #endif
    269 
    270 	len = chfs_nref_len(chmp, cheb, nref);
    271 	//dbg("len: %u\n", len);
    272 	//dbg("1. used: %u\n", cheb->used_size);
    273 
    274 	mutex_enter(&chmp->chm_lock_sizes);
    275 
    276 	if (CHFS_REF_FLAGS(nref) == CHFS_UNCHECKED_NODE_MASK) {
    277 		//dbg("UNCHECKED mark an unchecked node\n");
    278 		chfs_change_size_unchecked(chmp, cheb, -len);
    279 		//dbg("unchecked: %u\n", chmp->chm_unchecked_size);
    280 	} else {
    281 		chfs_change_size_used(chmp, cheb, -len);
    282 
    283 		//dbg("2. used: %u\n", cheb->used_size);
    284 		KASSERT(cheb->used_size <= chmp->chm_ebh->eb_size);
    285 	}
    286 	chfs_change_size_dirty(chmp, cheb, len);
    287 
    288 #ifdef DIAGNOSTIC
    289 	if (cheb->used_size + cheb->free_size + cheb->dirty_size +
    290 	    cheb->unchecked_size + cheb->wasted_size != chmp->chm_ebh->eb_size) {
    291 		panic("eraseblock leak detected!\nused: %u\nfree: %u\n"
    292 		    "dirty: %u\nunchecked: %u\nwasted: %u\ntotal: %u\nshould be: %zu\n",
    293 		    cheb->used_size, cheb->free_size, cheb->dirty_size,
    294 		    cheb->unchecked_size, cheb->wasted_size, cheb->used_size + cheb->free_size +
    295 		    cheb->dirty_size + cheb->unchecked_size + cheb->wasted_size,
    296 		    chmp->chm_ebh->eb_size);
    297 	}
    298 #endif
    299 	nref->nref_offset = CHFS_GET_OFS(nref->nref_offset) |
    300 	    CHFS_OBSOLETE_NODE_MASK;
    301 
    302 	if (chmp->chm_flags & CHFS_MP_FLAG_SCANNING) {
    303 		/*Scan is in progress, do nothing now*/
    304 		mutex_exit(&chmp->chm_lock_sizes);
    305 		return;
    306 	}
    307 
    308 	if (cheb == chmp->chm_nextblock) {
    309 		dbg("Not moving nextblock to dirty/erase_pending list\n");
    310 	} else if (!cheb->used_size && !cheb->unchecked_size) {
    311 		if (cheb == chmp->chm_gcblock) {
    312 			dbg("gcblock is completely dirtied\n");
    313 			chmp->chm_gcblock = NULL;
    314 		} else {
    315 			//remove from a tailq, but we don't know which tailq contains this cheb
    316 			//so we remove it from the dirty list now
    317 			//TAILQ_REMOVE(&chmp->chm_dirty_queue, cheb, queue);
    318 			int removed = 0;
    319 			struct chfs_eraseblock *eb, *tmpeb;
    320 			//XXX ugly code
    321 			TAILQ_FOREACH_SAFE(eb, &chmp->chm_free_queue, queue, tmpeb) {
    322 				if (eb == cheb) {
    323 					TAILQ_REMOVE(&chmp->chm_free_queue, cheb, queue);
    324 					removed = 1;
    325 					break;
    326 				}
    327 			}
    328 			if (removed == 0) {
    329 				TAILQ_FOREACH_SAFE(eb, &chmp->chm_dirty_queue, queue, tmpeb) {
    330 					if (eb == cheb) {
    331 						TAILQ_REMOVE(&chmp->chm_dirty_queue, cheb, queue);
    332 						removed = 1;
    333 						break;
    334 					}
    335 				}
    336 			}
    337 			if (removed == 0) {
    338 				TAILQ_FOREACH_SAFE(eb, &chmp->chm_very_dirty_queue, queue, tmpeb) {
    339 					if (eb == cheb) {
    340 						TAILQ_REMOVE(&chmp->chm_very_dirty_queue, cheb, queue);
    341 						removed = 1;
    342 						break;
    343 					}
    344 				}
    345 			}
    346 			if (removed == 0) {
    347 				TAILQ_FOREACH_SAFE(eb, &chmp->chm_clean_queue, queue, tmpeb) {
    348 					if (eb == cheb) {
    349 						TAILQ_REMOVE(&chmp->chm_clean_queue, cheb, queue);
    350 						removed = 1;
    351 						break;
    352 					}
    353 				}
    354 			}
    355 		}
    356 		if (chmp->chm_wbuf_len) {
    357 			dbg("Adding block to erasable pending wbuf queue\n");
    358 			TAILQ_INSERT_TAIL(&chmp->chm_erasable_pending_wbuf_queue,
    359 			    cheb, queue);
    360 		} else {
    361 			TAILQ_INSERT_TAIL(&chmp->chm_erase_pending_queue,
    362 			    cheb, queue);
    363 			chmp->chm_nr_erasable_blocks++;
    364 		}
    365 		chfs_remap_leb(chmp);
    366 	} else if (cheb == chmp->chm_gcblock) {
    367 		dbg("Not moving gcblock to dirty list\n");
    368 	} else if (cheb->dirty_size > MAX_DIRTY_TO_CLEAN &&
    369 	    cheb->dirty_size - len <= MAX_DIRTY_TO_CLEAN) {
    370 		dbg("Freshly dirtied, remove it from clean queue and "
    371 		    "add it to dirty\n");
    372 		TAILQ_REMOVE(&chmp->chm_clean_queue, cheb, queue);
    373 		TAILQ_INSERT_TAIL(&chmp->chm_dirty_queue, cheb, queue);
    374 	} else if (VERY_DIRTY(chmp, cheb->dirty_size) &&
    375 	    !VERY_DIRTY(chmp, cheb->dirty_size - len)) {
    376 		dbg("Becomes now very dirty, remove it from dirty "
    377 		    "queue and add it to very dirty\n");
    378 		TAILQ_REMOVE(&chmp->chm_dirty_queue, cheb, queue);
    379 		TAILQ_INSERT_TAIL(&chmp->chm_very_dirty_queue, cheb, queue);
    380 	} else {
    381 		dbg("Leave cheb where it is\n");
    382 	}
    383 	mutex_exit(&chmp->chm_lock_sizes);
    384 	return;
    385 }
    386 
    387 /**
    388  * chfs_close_eraseblock - close an eraseblock
    389  * @chmp: chfs mount structure
    390  * @cheb: eraseblock informations
    391  *
    392  * This function close the physical chain of the nodes on the eraseblock,
    393  * convert its free size to dirty and add it to clean, dirty or very dirty list.
    394  */
    395 int
    396 chfs_close_eraseblock(struct chfs_mount *chmp,
    397     struct chfs_eraseblock *cheb)
    398 {
    399 	uint32_t offset;
    400 	struct chfs_node_ref *nref;
    401 
    402 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    403 
    404 	offset = chmp->chm_ebh->eb_size - cheb->free_size;
    405 
    406 	// Close the chain
    407 	nref = chfs_alloc_node_ref(cheb);
    408 	if (!nref)
    409 		return ENOMEM;
    410 
    411 	nref->nref_next = NULL;
    412 	nref->nref_offset = offset;
    413 
    414 	// Mark space as dirty
    415 	chfs_update_eb_dirty(chmp, cheb, cheb->free_size);
    416 
    417 	if (cheb->dirty_size < MAX_DIRTY_TO_CLEAN) {
    418 		TAILQ_INSERT_TAIL(&chmp->chm_clean_queue, cheb, queue);
    419 	} else if (VERY_DIRTY(chmp, cheb->dirty_size)) {
    420 		TAILQ_INSERT_TAIL(&chmp->chm_very_dirty_queue, cheb, queue);
    421 	} else {
    422 		TAILQ_INSERT_TAIL(&chmp->chm_dirty_queue, cheb, queue);
    423 	}
    424 	return 0;
    425 }
    426 
    427 int
    428 chfs_reserve_space_normal(struct chfs_mount *chmp, uint32_t size, int prio)
    429 {
    430 	int ret;
    431 
    432 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    433 
    434 	mutex_enter(&chmp->chm_lock_sizes);
    435 	while (chmp->chm_nr_free_blocks + chmp->chm_nr_erasable_blocks < chmp->chm_resv_blocks_write) {
    436 		dbg("free: %d, erasable: %d, resv: %d\n", chmp->chm_nr_free_blocks, chmp->chm_nr_erasable_blocks, chmp->chm_resv_blocks_write);
    437 		uint32_t avail, dirty;
    438 		if (prio == ALLOC_DELETION && chmp->chm_nr_free_blocks + chmp->chm_nr_erasable_blocks >= chmp->chm_resv_blocks_deletion)
    439 			break;
    440 
    441 		dirty = chmp->chm_dirty_size - chmp->chm_nr_erasable_blocks * chmp->chm_ebh->eb_size + chmp->chm_unchecked_size;
    442 		if (dirty < chmp->chm_nospc_dirty) {
    443 			dbg("dirty: %u < nospc_dirty: %u\n", dirty, chmp->chm_nospc_dirty);
    444 			ret = ENOSPC;
    445 			mutex_exit(&chmp->chm_lock_sizes);
    446 			goto out;
    447 		}
    448 
    449 		avail = chmp->chm_free_size - (chmp->chm_resv_blocks_write * chmp->chm_ebh->eb_size);
    450 		if (size > avail) {
    451 			dbg("size: %u > avail: %u\n", size, avail);
    452 			ret = ENOSPC;
    453 			mutex_exit(&chmp->chm_lock_sizes);
    454 			goto out;
    455 		}
    456 
    457 		mutex_exit(&chmp->chm_lock_sizes);
    458 		ret = chfs_gcollect_pass(chmp);
    459 		/* gcollect_pass exits chm_lock_mountfields */
    460 		mutex_enter(&chmp->chm_lock_mountfields);
    461 		mutex_enter(&chmp->chm_lock_sizes);
    462 
    463 		if (chmp->chm_nr_erasable_blocks ||
    464 		    !TAILQ_EMPTY(&chmp->chm_erasable_pending_wbuf_queue) ||
    465 		    ret == EAGAIN) {
    466 			ret = chfs_remap_leb(chmp);
    467 		}
    468 
    469 		if (ret) {
    470 			mutex_exit(&chmp->chm_lock_sizes);
    471 			goto out;
    472 		}
    473 	}
    474 
    475 	mutex_exit(&chmp->chm_lock_sizes);
    476 	ret = chfs_reserve_space(chmp, size);
    477 out:
    478 	return ret;
    479 }
    480 
    481 
    482 int
    483 chfs_reserve_space_gc(struct chfs_mount *chmp, uint32_t size)
    484 {
    485 	int ret;
    486 
    487 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    488 
    489 	mutex_enter(&chmp->chm_lock_sizes);
    490 	chfs_remap_leb(chmp);
    491 
    492 	if (size > chmp->chm_free_size) {
    493 		dbg("size: %u\n", size);
    494 		mutex_exit(&chmp->chm_lock_sizes);
    495 		return ENOSPC;
    496 	}
    497 
    498 	mutex_exit(&chmp->chm_lock_sizes);
    499 	ret = chfs_reserve_space(chmp, size);
    500 	return ret;
    501 }
    502 
    503 /**
    504  * chfs_reserve_space - finds a block which free size is >= requested size
    505  * @chmp: chfs mount point
    506  * @size: requested size
    507  * @len: reserved spaced will be returned in this variable;
    508  * Returns zero in case of success, error code in case of fail.
    509  */
    510 int
    511 chfs_reserve_space(struct chfs_mount *chmp, uint32_t size)
    512 {
    513 	//TODO define minimum reserved blocks, which is needed for writing
    514 	//TODO check we have enough free blocks to write
    515 	//TODO if no: need erase and GC
    516 
    517 	int err;
    518 	struct chfs_eraseblock *cheb;
    519 
    520 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    521 	KASSERT(!mutex_owned(&chmp->chm_lock_sizes));
    522 
    523 	cheb = chmp->chm_nextblock;
    524 	//if (cheb)
    525 	    //dbg("cheb->free_size %u\n", cheb->free_size);
    526 	if (cheb && size > cheb->free_size) {
    527 		dbg("size: %u > free_size: %u\n", size, cheb->free_size);
    528 		/*
    529 		 * There isn't enough space on this eraseblock, we mark this as
    530 		 * dirty and close the physical chain of the node refs.
    531 		 */
    532 		//Write out pending data if any
    533 		if (chmp->chm_wbuf_len) {
    534 			chfs_flush_pending_wbuf(chmp);
    535 			//FIXME need goto restart here?
    536 		}
    537 
    538 		while (chmp->chm_wbuf_ofs < chmp->chm_ebh->eb_size) {
    539 			dbg("wbuf ofs: %zu - eb_size: %zu\n",
    540 			    chmp->chm_wbuf_ofs, chmp->chm_ebh->eb_size);
    541 			chfs_flush_pending_wbuf(chmp);
    542 		}
    543 
    544 		if (!(chmp->chm_wbuf_ofs % chmp->chm_ebh->eb_size) && !chmp->chm_wbuf_len)
    545 			chmp->chm_wbuf_ofs = 0xffffffff;
    546 
    547 		err = chfs_close_eraseblock(chmp, cheb);
    548 		if (err)
    549 			return err;
    550 
    551 		cheb = NULL;
    552 	}
    553 	if (!cheb) {
    554 		//get a block for nextblock
    555 		if (TAILQ_EMPTY(&chmp->chm_free_queue)) {
    556 			// If this succeeds there will be a block on free_queue
    557 			dbg("cheb remap (free: %d)\n", chmp->chm_nr_free_blocks);
    558 			err = chfs_remap_leb(chmp);
    559 			if (err)
    560 				return err;
    561 		}
    562 		cheb = TAILQ_FIRST(&chmp->chm_free_queue);
    563 		TAILQ_REMOVE(&chmp->chm_free_queue, cheb, queue);
    564 		chmp->chm_nextblock = cheb;
    565 		chmp->chm_nr_free_blocks--;
    566 	}
    567 
    568 	return 0;
    569 }
    570 
    571