Home | History | Annotate | Line # | Download | only in chfs
      1 /*	$NetBSD: chfs_nodeops.c,v 1.5 2021/12/07 21:37:37 andvar Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2010 Department of Software Engineering,
      5  *		      University of Szeged, Hungary
      6  * Copyright (C) 2010 David Tengeri <dtengeri (at) inf.u-szeged.hu>
      7  * Copyright (C) 2010 Tamas Toth <ttoth (at) inf.u-szeged.hu>
      8  * Copyright (C) 2010 Adam Hoka <ahoka (at) NetBSD.org>
      9  * All rights reserved.
     10  *
     11  * This code is derived from software contributed to The NetBSD Foundation
     12  * by the Department of Software Engineering, University of Szeged, Hungary
     13  *
     14  * Redistribution and use in source and binary forms, with or without
     15  * modification, are permitted provided that the following conditions
     16  * are met:
     17  * 1. Redistributions of source code must retain the above copyright
     18  *    notice, this list of conditions and the following disclaimer.
     19  * 2. Redistributions in binary form must reproduce the above copyright
     20  *    notice, this list of conditions and the following disclaimer in the
     21  *    documentation and/or other materials provided with the distribution.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33  * SUCH DAMAGE.
     34  */
     35 
     36 #include "chfs.h"
     37 
     38 /*
     39  * chfs_update_eb_dirty - updates dirty and free space, first and
     40  *			      last node references
     41  * Returns zero in case of success, 1 in case of fail.
     42  */
     43 int
     44 chfs_update_eb_dirty(struct chfs_mount *chmp,
     45     struct chfs_eraseblock *cheb, uint32_t size)
     46 {
     47 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
     48 	KASSERT(!mutex_owned(&chmp->chm_lock_sizes));
     49 
     50 	if (!size)
     51 		return 0;
     52 
     53 	if (size > cheb->free_size) {
     54 		chfs_err("free_size (%d) is less than dirty space (%d) "
     55 		    "on block (%d)\n", cheb->free_size, size, cheb->lnr);
     56 		return 1;
     57 	}
     58 	mutex_enter(&chmp->chm_lock_sizes);
     59 	chfs_change_size_free(chmp, cheb, -size);
     60 	chfs_change_size_dirty(chmp, cheb, size);
     61 	mutex_exit(&chmp->chm_lock_sizes);
     62 	return 0;
     63 }
     64 
     65 /*
     66  * chfs_add_node_to_list - adds a data node ref to vnode cache's dnode list
     67  * This function inserts a data node ref to the list of vnode cache.
     68  * The list is sorted by data node's lnr and offset.
     69  */
     70 void
     71 chfs_add_node_to_list(struct chfs_mount *chmp,
     72     struct chfs_vnode_cache *vc,
     73     struct chfs_node_ref *new, struct chfs_node_ref **list)
     74 {
     75 	KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
     76 
     77 	struct chfs_node_ref *nextref = *list;
     78 	struct chfs_node_ref *prevref = NULL;
     79 
     80 	while (nextref && nextref != (struct chfs_node_ref *)vc &&
     81 	    (nextref->nref_lnr <= new->nref_lnr)) {
     82 		if (nextref->nref_lnr == new->nref_lnr) {
     83 			while (nextref && nextref !=
     84 			    (struct chfs_node_ref *)vc &&
     85 			    (CHFS_GET_OFS(nextref->nref_offset) <
     86 				CHFS_GET_OFS(new->nref_offset))) {
     87 				prevref = nextref;
     88 				nextref = nextref->nref_next;
     89 			}
     90 			break;
     91 		}
     92 		prevref = nextref;
     93 		nextref = nextref->nref_next;
     94 	}
     95 
     96 	if (nextref && nextref != (struct chfs_node_ref *)vc &&
     97 	    nextref->nref_lnr == new->nref_lnr &&
     98 	    CHFS_GET_OFS(nextref->nref_offset) ==
     99 	    CHFS_GET_OFS(new->nref_offset)) {
    100 		new->nref_next = nextref->nref_next;
    101 		chfs_mark_node_obsolete(chmp, nextref);
    102 	} else {
    103 		new->nref_next = nextref;
    104 	}
    105 
    106 	KASSERT(new->nref_next != NULL);
    107 
    108 	if (prevref) {
    109 		prevref->nref_next = new;
    110 	} else {
    111 		*list = new;
    112 	}
    113 }
    114 
    115 /*
    116  * chfs_remove_node_from_list - removes a node from a list
    117  * Usually used for removing data nodes.
    118  */
    119 void
    120 chfs_remove_node_from_list(struct chfs_mount *chmp,
    121 	struct chfs_vnode_cache *vc,
    122 	struct chfs_node_ref *old_nref, struct chfs_node_ref **list)
    123 {
    124 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    125 	KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
    126 
    127 	struct chfs_node_ref *tmpnref;
    128 
    129 	if (*list == (struct chfs_node_ref *)vc) {
    130 		/* list is empty */
    131 		return;
    132 	}
    133 
    134 	KASSERT(old_nref->nref_next != NULL);
    135 
    136 	if (*list == old_nref) {
    137 		*list = old_nref->nref_next;
    138 	} else {
    139 		tmpnref = *list;
    140 		while (tmpnref->nref_next &&
    141 			tmpnref->nref_next != (struct chfs_node_ref *)vc) {
    142 			if (tmpnref->nref_next == old_nref) {
    143 				tmpnref->nref_next = old_nref->nref_next;
    144 				break;
    145 			}
    146 			tmpnref = tmpnref->nref_next;
    147 		}
    148 	}
    149 }
    150 
    151 /*
    152  * chfs_remove_and_obsolete - removes a node from a list and obsoletes the nref
    153  * We should use this function carefully on data nodes,
    154  * because removing a frag will also obsolete the node ref.
    155  */
    156 void
    157 chfs_remove_and_obsolete(struct chfs_mount *chmp,
    158 	struct chfs_vnode_cache *vc,
    159 	struct chfs_node_ref *old_nref, struct chfs_node_ref **list)
    160 {
    161 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    162 	KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
    163 
    164 	chfs_remove_node_from_list(chmp, vc, old_nref, list);
    165 
    166 	dbg("[MARK] vno: %llu lnr: %u ofs: %u\n", vc->vno, old_nref->nref_lnr,
    167 		old_nref->nref_offset);
    168 	chfs_mark_node_obsolete(chmp, old_nref);
    169 }
    170 
    171 /* chfs_add_fd_to_inode - adds a directory entry to an inode */
    172 void
    173 chfs_add_fd_to_inode(struct chfs_mount *chmp,
    174     struct chfs_inode *parent, struct chfs_dirent *new)
    175 {
    176 	struct chfs_dirent *fd, *tmpfd;
    177 
    178 	/* update highest version */
    179 	if (new->version > parent->chvc->highest_version) {
    180 		parent->chvc->highest_version = new->version;
    181 	}
    182 
    183 	TAILQ_FOREACH_SAFE(fd, &parent->dents, fds, tmpfd) {
    184 		if (fd->nhash > new->nhash) {
    185 			/* insert new before fd */
    186 			TAILQ_INSERT_BEFORE(fd, new, fds);
    187 			return;
    188 		} else if (fd->nhash == new->nhash &&
    189 		    !strcmp(fd->name, new->name)) {
    190 			if (new->version > fd->version) {
    191 				/* replace fd with new */
    192 				TAILQ_INSERT_BEFORE(fd, new, fds);
    193 				TAILQ_REMOVE(&parent->dents, fd, fds);
    194 				if (fd->nref) {
    195 					mutex_enter(&chmp->chm_lock_vnocache);
    196 					chfs_remove_and_obsolete(chmp, parent->chvc, fd->nref,
    197 						&parent->chvc->dirents);
    198 					mutex_exit(&chmp->chm_lock_vnocache);
    199 				}
    200 				chfs_free_dirent(fd);
    201 			} else {
    202 				/* new is older (normally it's not an option) */
    203 				chfs_mark_node_obsolete(chmp, new->nref);
    204 				chfs_free_dirent(new);
    205 			}
    206 			return;
    207 		}
    208 	}
    209 	/* if we couldn't fit it elsewhere, lets add to the end */
    210 	/* FIXME insert tail or insert head? */
    211 	TAILQ_INSERT_HEAD(&parent->dents, new, fds);
    212 }
    213 
    214 
    215 /* chfs_add_vnode_ref_to_vc - adds a vnode info to the vnode cache */
    216 void
    217 chfs_add_vnode_ref_to_vc(struct chfs_mount *chmp,
    218     struct chfs_vnode_cache *vc, struct chfs_node_ref *new)
    219 {
    220 	KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
    221 	struct chfs_node_ref *nref;
    222 
    223 	/* store only the last one, drop the others */
    224 	while (vc->v != (struct chfs_node_ref *)vc) {
    225 		nref = vc->v;
    226 		chfs_remove_and_obsolete(chmp, vc, nref, &vc->v);
    227 	}
    228 
    229 	new->nref_next = (struct chfs_node_ref *)vc;
    230 	vc->v = new;
    231 }
    232 
    233 /* chfs_nref_next - step to the next in-memory nref */
    234 struct chfs_node_ref *
    235 chfs_nref_next(struct chfs_node_ref *nref)
    236 {
    237 	nref++;
    238 	if (nref->nref_lnr == REF_LINK_TO_NEXT) {
    239 		/* end of chain */
    240 		if (!nref->nref_next)
    241 			return NULL;
    242 
    243 		/* link to the next block */
    244 		nref = nref->nref_next;
    245 	}
    246 	/* end of chain */
    247 	if (nref->nref_lnr == REF_EMPTY_NODE)
    248 		return NULL;
    249 
    250 	return nref;
    251 }
    252 
    253 /* chfs_nref_len - calculates the length of an nref */
    254 int
    255 chfs_nref_len(struct chfs_mount *chmp,
    256     struct chfs_eraseblock *cheb, struct chfs_node_ref *nref)
    257 {
    258 	struct chfs_node_ref *next;
    259 
    260 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    261 
    262 	if (!cheb)
    263 		cheb = &chmp->chm_blocks[nref->nref_lnr];
    264 
    265 	next = chfs_nref_next(nref);
    266 
    267 	if (!next) {
    268 		return chmp->chm_ebh->eb_size - cheb->free_size -
    269 		    CHFS_GET_OFS(nref->nref_offset);
    270 	}
    271 	return CHFS_GET_OFS(next->nref_offset) -
    272 	    CHFS_GET_OFS(nref->nref_offset);
    273 }
    274 
    275 /* chfs_mark_node_obsolete - marks a node as obsolete */
    276 void
    277 chfs_mark_node_obsolete(struct chfs_mount *chmp,
    278     struct chfs_node_ref *nref)
    279 {
    280 	int len;
    281 	struct chfs_eraseblock *cheb;
    282 
    283 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    284 
    285 	KASSERT(!CHFS_REF_OBSOLETE(nref));
    286 
    287 	KASSERT(nref->nref_lnr <= chmp->chm_ebh->peb_nr);
    288 	cheb = &chmp->chm_blocks[nref->nref_lnr];
    289 
    290 #ifdef DIAGNOSTIC
    291 	if (cheb->used_size + cheb->free_size + cheb->dirty_size +
    292 	    cheb->unchecked_size + cheb->wasted_size != chmp->chm_ebh->eb_size) {
    293 		dbg("eraseblock leak detected!\nused: %u\nfree: %u\n"
    294 		    "dirty: %u\nunchecked: %u\nwasted: %u\ntotal: %u\nshould be: %zu\n",
    295 		    cheb->used_size, cheb->free_size, cheb->dirty_size,
    296 		    cheb->unchecked_size, cheb->wasted_size, cheb->used_size + cheb->free_size +
    297 		    cheb->dirty_size + cheb->unchecked_size + cheb->wasted_size,
    298 		    chmp->chm_ebh->eb_size);
    299 	}
    300 #endif
    301 
    302 	len = chfs_nref_len(chmp, cheb, nref);
    303 
    304 	mutex_enter(&chmp->chm_lock_sizes);
    305 
    306 	if (CHFS_REF_FLAGS(nref) == CHFS_UNCHECKED_NODE_MASK) {
    307 		chfs_change_size_unchecked(chmp, cheb, -len);
    308 	} else {
    309 		chfs_change_size_used(chmp, cheb, -len);
    310 
    311 		KASSERT(cheb->used_size <= chmp->chm_ebh->eb_size);
    312 	}
    313 	chfs_change_size_dirty(chmp, cheb, len);
    314 
    315 #ifdef DIAGNOSTIC
    316 	if (cheb->used_size + cheb->free_size + cheb->dirty_size +
    317 	    cheb->unchecked_size + cheb->wasted_size != chmp->chm_ebh->eb_size) {
    318 		panic("eraseblock leak detected!\nused: %u\nfree: %u\n"
    319 		    "dirty: %u\nunchecked: %u\nwasted: %u\ntotal: %u\nshould be: %zu\n",
    320 		    cheb->used_size, cheb->free_size, cheb->dirty_size,
    321 		    cheb->unchecked_size, cheb->wasted_size, cheb->used_size + cheb->free_size +
    322 		    cheb->dirty_size + cheb->unchecked_size + cheb->wasted_size,
    323 		    chmp->chm_ebh->eb_size);
    324 	}
    325 #endif
    326 	nref->nref_offset = CHFS_GET_OFS(nref->nref_offset) |
    327 	    CHFS_OBSOLETE_NODE_MASK;
    328 
    329 	if (chmp->chm_flags & CHFS_MP_FLAG_SCANNING) {
    330 		/*Scan is in progress, do nothing now*/
    331 		mutex_exit(&chmp->chm_lock_sizes);
    332 		return;
    333 	}
    334 
    335 	if (cheb == chmp->chm_nextblock) {
    336 		dbg("Not moving nextblock to dirty/erase_pending list\n");
    337 	} else if (!cheb->used_size && !cheb->unchecked_size) {
    338 		if (cheb == chmp->chm_gcblock) {
    339 			dbg("gcblock is completely dirtied\n");
    340 			chmp->chm_gcblock = NULL;
    341 		} else {
    342 			/* remove from a tailq, but we don't know which tailq contains this cheb
    343 			 * so we remove it from the dirty list now */
    344 			//TAILQ_REMOVE(&chmp->chm_dirty_queue, cheb, queue);
    345 			int removed = 0;
    346 			struct chfs_eraseblock *eb, *tmpeb;
    347 			//XXX ugly code
    348 			TAILQ_FOREACH_SAFE(eb, &chmp->chm_free_queue, queue, tmpeb) {
    349 				if (eb == cheb) {
    350 					TAILQ_REMOVE(&chmp->chm_free_queue, cheb, queue);
    351 					removed = 1;
    352 					break;
    353 				}
    354 			}
    355 			if (removed == 0) {
    356 				TAILQ_FOREACH_SAFE(eb, &chmp->chm_dirty_queue, queue, tmpeb) {
    357 					if (eb == cheb) {
    358 						TAILQ_REMOVE(&chmp->chm_dirty_queue, cheb, queue);
    359 						removed = 1;
    360 						break;
    361 					}
    362 				}
    363 			}
    364 			if (removed == 0) {
    365 				TAILQ_FOREACH_SAFE(eb, &chmp->chm_very_dirty_queue, queue, tmpeb) {
    366 					if (eb == cheb) {
    367 						TAILQ_REMOVE(&chmp->chm_very_dirty_queue, cheb, queue);
    368 						removed = 1;
    369 						break;
    370 					}
    371 				}
    372 			}
    373 			if (removed == 0) {
    374 				TAILQ_FOREACH_SAFE(eb, &chmp->chm_clean_queue, queue, tmpeb) {
    375 					if (eb == cheb) {
    376 						TAILQ_REMOVE(&chmp->chm_clean_queue, cheb, queue);
    377 						removed = 1;
    378 						break;
    379 					}
    380 				}
    381 			}
    382 		}
    383 		if (chmp->chm_wbuf_len) {
    384 			dbg("Adding block to erasable pending wbuf queue\n");
    385 			TAILQ_INSERT_TAIL(&chmp->chm_erasable_pending_wbuf_queue,
    386 			    cheb, queue);
    387 		} else {
    388 			TAILQ_INSERT_TAIL(&chmp->chm_erase_pending_queue,
    389 			    cheb, queue);
    390 			chmp->chm_nr_erasable_blocks++;
    391 		}
    392 		chfs_remap_leb(chmp);
    393 	} else if (cheb == chmp->chm_gcblock) {
    394 		dbg("Not moving gcblock to dirty list\n");
    395 	} else if (cheb->dirty_size > MAX_DIRTY_TO_CLEAN &&
    396 	    cheb->dirty_size - len <= MAX_DIRTY_TO_CLEAN) {
    397 		dbg("Freshly dirtied, remove it from clean queue and "
    398 		    "add it to dirty\n");
    399 		TAILQ_REMOVE(&chmp->chm_clean_queue, cheb, queue);
    400 		TAILQ_INSERT_TAIL(&chmp->chm_dirty_queue, cheb, queue);
    401 	} else if (VERY_DIRTY(chmp, cheb->dirty_size) &&
    402 	    !VERY_DIRTY(chmp, cheb->dirty_size - len)) {
    403 		dbg("Becomes now very dirty, remove it from dirty "
    404 		    "queue and add it to very dirty\n");
    405 		TAILQ_REMOVE(&chmp->chm_dirty_queue, cheb, queue);
    406 		TAILQ_INSERT_TAIL(&chmp->chm_very_dirty_queue, cheb, queue);
    407 	} else {
    408 		dbg("Leave cheb where it is\n");
    409 	}
    410 	mutex_exit(&chmp->chm_lock_sizes);
    411 	return;
    412 }
    413 
    414 /*
    415  * chfs_close_eraseblock - close an eraseblock
    416  *
    417  * This function close the physical chain of the nodes on the eraseblock,
    418  * convert its free size to dirty and add it to clean, dirty or very dirty list.
    419  */
    420 int
    421 chfs_close_eraseblock(struct chfs_mount *chmp,
    422     struct chfs_eraseblock *cheb)
    423 {
    424 	uint32_t offset;
    425 	struct chfs_node_ref *nref;
    426 
    427 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    428 
    429 	offset = chmp->chm_ebh->eb_size - cheb->free_size;
    430 
    431 	// Close the chain
    432 	nref = chfs_alloc_node_ref(cheb);
    433 	if (!nref)
    434 		return ENOMEM;
    435 
    436 	nref->nref_next = NULL;
    437 	nref->nref_offset = offset;
    438 
    439 	// Mark space as dirty
    440 	chfs_update_eb_dirty(chmp, cheb, cheb->free_size);
    441 
    442 	if (cheb->dirty_size < MAX_DIRTY_TO_CLEAN) {
    443 		TAILQ_INSERT_TAIL(&chmp->chm_clean_queue, cheb, queue);
    444 	} else if (VERY_DIRTY(chmp, cheb->dirty_size)) {
    445 		TAILQ_INSERT_TAIL(&chmp->chm_very_dirty_queue, cheb, queue);
    446 	} else {
    447 		TAILQ_INSERT_TAIL(&chmp->chm_dirty_queue, cheb, queue);
    448 	}
    449 	return 0;
    450 }
    451 
    452 /*
    453  * chfs_reserve_space_normal -
    454  * checks available space and calls chfs_reserve_space
    455  * used during writing
    456  */
    457 int
    458 chfs_reserve_space_normal(struct chfs_mount *chmp, uint32_t size, int prio)
    459 {
    460 	int ret;
    461 
    462 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    463 
    464 	mutex_enter(&chmp->chm_lock_sizes);
    465 	while (chmp->chm_nr_free_blocks + chmp->chm_nr_erasable_blocks < chmp->chm_resv_blocks_write) {
    466 		dbg("free: %d, erasable: %d, resv: %d\n", chmp->chm_nr_free_blocks, chmp->chm_nr_erasable_blocks, chmp->chm_resv_blocks_write);
    467 		uint32_t avail, dirty;
    468 		if (prio == ALLOC_DELETION && chmp->chm_nr_free_blocks + chmp->chm_nr_erasable_blocks >= chmp->chm_resv_blocks_deletion)
    469 			break;
    470 
    471 		dirty = chmp->chm_dirty_size - chmp->chm_nr_erasable_blocks * chmp->chm_ebh->eb_size + chmp->chm_unchecked_size;
    472 		if (dirty < chmp->chm_nospc_dirty) {
    473 			dbg("dirty: %u < nospc_dirty: %u\n", dirty, chmp->chm_nospc_dirty);
    474 			ret = ENOSPC;
    475 			mutex_exit(&chmp->chm_lock_sizes);
    476 			goto out;
    477 		}
    478 
    479 		avail = chmp->chm_free_size - (chmp->chm_resv_blocks_write * chmp->chm_ebh->eb_size);
    480 		if (size > avail) {
    481 			dbg("size: %u > avail: %u\n", size, avail);
    482 			ret = ENOSPC;
    483 			mutex_exit(&chmp->chm_lock_sizes);
    484 			goto out;
    485 		}
    486 
    487 		mutex_exit(&chmp->chm_lock_sizes);
    488 		ret = chfs_gcollect_pass(chmp);
    489 		mutex_enter(&chmp->chm_lock_sizes);
    490 
    491 		if (chmp->chm_nr_erasable_blocks ||
    492 		    !TAILQ_EMPTY(&chmp->chm_erasable_pending_wbuf_queue) ||
    493 		    ret == EAGAIN) {
    494 			ret = chfs_remap_leb(chmp);
    495 		}
    496 
    497 		if (ret) {
    498 			mutex_exit(&chmp->chm_lock_sizes);
    499 			goto out;
    500 		}
    501 	}
    502 
    503 	mutex_exit(&chmp->chm_lock_sizes);
    504 	ret = chfs_reserve_space(chmp, size);
    505 out:
    506 	return ret;
    507 }
    508 
    509 
    510 /* chfs_reserve_space_gc - tries to reserve space for GC */
    511 int
    512 chfs_reserve_space_gc(struct chfs_mount *chmp, uint32_t size)
    513 {
    514 	int ret;
    515 
    516 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    517 
    518 	mutex_enter(&chmp->chm_lock_sizes);
    519 	chfs_remap_leb(chmp);
    520 
    521 	if (size > chmp->chm_free_size) {
    522 		dbg("size: %u\n", size);
    523 		mutex_exit(&chmp->chm_lock_sizes);
    524 		return ENOSPC;
    525 	}
    526 
    527 	mutex_exit(&chmp->chm_lock_sizes);
    528 	ret = chfs_reserve_space(chmp, size);
    529 	return ret;
    530 }
    531 
    532 /*
    533  * chfs_reserve_space - finds a block which free size is >= requested size
    534  * Returns zero in case of success, error code in case of fail.
    535  */
    536 int
    537 chfs_reserve_space(struct chfs_mount *chmp, uint32_t size)
    538 {
    539 	//TODO define minimum reserved blocks, which is needed for writing
    540 	//TODO check we have enough free blocks to write
    541 	//TODO if no: need erase and GC
    542 
    543 	int err;
    544 	struct chfs_eraseblock *cheb;
    545 
    546 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    547 	KASSERT(!mutex_owned(&chmp->chm_lock_sizes));
    548 
    549 	cheb = chmp->chm_nextblock;
    550 	if (cheb && size > cheb->free_size) {
    551 		dbg("size: %u > free_size: %u\n", size, cheb->free_size);
    552 		/*
    553 		 * There isn't enough space on this eraseblock, we mark this as
    554 		 * dirty and close the physical chain of the node refs.
    555 		 */
    556 		//Write out pending data if any
    557 		if (chmp->chm_wbuf_len) {
    558 			chfs_flush_pending_wbuf(chmp);
    559 			//FIXME need goto restart here?
    560 		}
    561 
    562 		while (chmp->chm_wbuf_ofs < chmp->chm_ebh->eb_size) {
    563 			dbg("wbuf ofs: %zu - eb_size: %zu\n",
    564 			    chmp->chm_wbuf_ofs, chmp->chm_ebh->eb_size);
    565 			chfs_flush_pending_wbuf(chmp);
    566 		}
    567 
    568 		if (!(chmp->chm_wbuf_ofs % chmp->chm_ebh->eb_size) && !chmp->chm_wbuf_len)
    569 			chmp->chm_wbuf_ofs = 0xffffffff;
    570 
    571 		err = chfs_close_eraseblock(chmp, cheb);
    572 		if (err)
    573 			return err;
    574 
    575 		cheb = NULL;
    576 	}
    577 	if (!cheb) {
    578 		//get a block for nextblock
    579 		if (TAILQ_EMPTY(&chmp->chm_free_queue)) {
    580 			// If this succeeds there will be a block on free_queue
    581 			dbg("cheb remap (free: %d)\n", chmp->chm_nr_free_blocks);
    582 			err = chfs_remap_leb(chmp);
    583 			if (err)
    584 				return err;
    585 		}
    586 		cheb = TAILQ_FIRST(&chmp->chm_free_queue);
    587 		TAILQ_REMOVE(&chmp->chm_free_queue, cheb, queue);
    588 		chmp->chm_nextblock = cheb;
    589 		chmp->chm_nr_free_blocks--;
    590 	}
    591 
    592 	return 0;
    593 }
    594 
    595