Home | History | Annotate | Line # | Download | only in chfs
chfs_nodeops.c revision 1.4
      1  1.4    wiz /*	$NetBSD: chfs_nodeops.c,v 1.4 2013/12/09 09:35:17 wiz Exp $	*/
      2  1.1  ahoka 
      3  1.1  ahoka /*-
      4  1.1  ahoka  * Copyright (c) 2010 Department of Software Engineering,
      5  1.1  ahoka  *		      University of Szeged, Hungary
      6  1.1  ahoka  * Copyright (C) 2010 David Tengeri <dtengeri (at) inf.u-szeged.hu>
      7  1.1  ahoka  * Copyright (C) 2010 Tamas Toth <ttoth (at) inf.u-szeged.hu>
      8  1.1  ahoka  * Copyright (C) 2010 Adam Hoka <ahoka (at) NetBSD.org>
      9  1.1  ahoka  * All rights reserved.
     10  1.1  ahoka  *
     11  1.1  ahoka  * This code is derived from software contributed to The NetBSD Foundation
     12  1.1  ahoka  * by the Department of Software Engineering, University of Szeged, Hungary
     13  1.1  ahoka  *
     14  1.1  ahoka  * Redistribution and use in source and binary forms, with or without
     15  1.1  ahoka  * modification, are permitted provided that the following conditions
     16  1.1  ahoka  * are met:
     17  1.1  ahoka  * 1. Redistributions of source code must retain the above copyright
     18  1.1  ahoka  *    notice, this list of conditions and the following disclaimer.
     19  1.1  ahoka  * 2. Redistributions in binary form must reproduce the above copyright
     20  1.1  ahoka  *    notice, this list of conditions and the following disclaimer in the
     21  1.1  ahoka  *    documentation and/or other materials provided with the distribution.
     22  1.1  ahoka  *
     23  1.1  ahoka  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  1.1  ahoka  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  1.1  ahoka  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  1.1  ahoka  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  1.1  ahoka  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     28  1.1  ahoka  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     29  1.1  ahoka  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     30  1.1  ahoka  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     31  1.1  ahoka  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32  1.1  ahoka  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33  1.1  ahoka  * SUCH DAMAGE.
     34  1.1  ahoka  */
     35  1.1  ahoka 
     36  1.1  ahoka #include "chfs.h"
     37  1.1  ahoka 
     38  1.3  ttoth /*
     39  1.1  ahoka  * chfs_update_eb_dirty - updates dirty and free space, first and
     40  1.1  ahoka  *			      last node references
     41  1.3  ttoth  * Returns zero in case of success, 1 in case of fail.
     42  1.1  ahoka  */
     43  1.1  ahoka int
     44  1.1  ahoka chfs_update_eb_dirty(struct chfs_mount *chmp,
     45  1.1  ahoka     struct chfs_eraseblock *cheb, uint32_t size)
     46  1.1  ahoka {
     47  1.1  ahoka 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
     48  1.1  ahoka 	KASSERT(!mutex_owned(&chmp->chm_lock_sizes));
     49  1.1  ahoka 
     50  1.1  ahoka 	if (!size)
     51  1.1  ahoka 		return 0;
     52  1.1  ahoka 
     53  1.1  ahoka 	if (size > cheb->free_size) {
     54  1.4    wiz 		chfs_err("free_size (%d) is less than dirty space (%d) "
     55  1.1  ahoka 		    "on block (%d)\n", cheb->free_size, size, cheb->lnr);
     56  1.1  ahoka 		return 1;
     57  1.1  ahoka 	}
     58  1.1  ahoka 	mutex_enter(&chmp->chm_lock_sizes);
     59  1.1  ahoka 	chfs_change_size_free(chmp, cheb, -size);
     60  1.1  ahoka 	chfs_change_size_dirty(chmp, cheb, size);
     61  1.1  ahoka 	mutex_exit(&chmp->chm_lock_sizes);
     62  1.1  ahoka 	return 0;
     63  1.1  ahoka }
     64  1.1  ahoka 
     65  1.3  ttoth /*
     66  1.1  ahoka  * chfs_add_node_to_list - adds a data node ref to vnode cache's dnode list
     67  1.1  ahoka  * This function inserts a data node ref to the list of vnode cache.
     68  1.1  ahoka  * The list is sorted by data node's lnr and offset.
     69  1.1  ahoka  */
     70  1.1  ahoka void
     71  1.1  ahoka chfs_add_node_to_list(struct chfs_mount *chmp,
     72  1.1  ahoka     struct chfs_vnode_cache *vc,
     73  1.1  ahoka     struct chfs_node_ref *new, struct chfs_node_ref **list)
     74  1.1  ahoka {
     75  1.2  ttoth 	KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
     76  1.2  ttoth 
     77  1.1  ahoka 	struct chfs_node_ref *nextref = *list;
     78  1.1  ahoka 	struct chfs_node_ref *prevref = NULL;
     79  1.1  ahoka 
     80  1.1  ahoka 	while (nextref && nextref != (struct chfs_node_ref *)vc &&
     81  1.1  ahoka 	    (nextref->nref_lnr <= new->nref_lnr)) {
     82  1.1  ahoka 		if (nextref->nref_lnr == new->nref_lnr) {
     83  1.1  ahoka 			while (nextref && nextref !=
     84  1.1  ahoka 			    (struct chfs_node_ref *)vc &&
     85  1.1  ahoka 			    (CHFS_GET_OFS(nextref->nref_offset) <
     86  1.1  ahoka 				CHFS_GET_OFS(new->nref_offset))) {
     87  1.1  ahoka 				prevref = nextref;
     88  1.1  ahoka 				nextref = nextref->nref_next;
     89  1.1  ahoka 			}
     90  1.1  ahoka 			break;
     91  1.1  ahoka 		}
     92  1.1  ahoka 		prevref = nextref;
     93  1.1  ahoka 		nextref = nextref->nref_next;
     94  1.1  ahoka 	}
     95  1.1  ahoka 
     96  1.1  ahoka 	if (nextref && nextref != (struct chfs_node_ref *)vc &&
     97  1.1  ahoka 	    nextref->nref_lnr == new->nref_lnr &&
     98  1.1  ahoka 	    CHFS_GET_OFS(nextref->nref_offset) ==
     99  1.1  ahoka 	    CHFS_GET_OFS(new->nref_offset)) {
    100  1.1  ahoka 		new->nref_next = nextref->nref_next;
    101  1.2  ttoth 		chfs_mark_node_obsolete(chmp, nextref);
    102  1.1  ahoka 	} else {
    103  1.1  ahoka 		new->nref_next = nextref;
    104  1.1  ahoka 	}
    105  1.1  ahoka 
    106  1.2  ttoth 	KASSERT(new->nref_next != NULL);
    107  1.2  ttoth 
    108  1.1  ahoka 	if (prevref) {
    109  1.1  ahoka 		prevref->nref_next = new;
    110  1.1  ahoka 	} else {
    111  1.1  ahoka 		*list = new;
    112  1.1  ahoka 	}
    113  1.1  ahoka }
    114  1.1  ahoka 
    115  1.2  ttoth /*
    116  1.3  ttoth  * chfs_remove_node_from_list - removes a node from a list
    117  1.3  ttoth  * Usually used for removing data nodes.
    118  1.2  ttoth  */
    119  1.2  ttoth void
    120  1.2  ttoth chfs_remove_node_from_list(struct chfs_mount *chmp,
    121  1.2  ttoth 	struct chfs_vnode_cache *vc,
    122  1.2  ttoth 	struct chfs_node_ref *old_nref, struct chfs_node_ref **list)
    123  1.2  ttoth {
    124  1.2  ttoth 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    125  1.2  ttoth 	KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
    126  1.2  ttoth 
    127  1.2  ttoth 	struct chfs_node_ref *tmpnref;
    128  1.2  ttoth 
    129  1.2  ttoth 	if (*list == (struct chfs_node_ref *)vc) {
    130  1.3  ttoth 		/* list is empty */
    131  1.2  ttoth 		return;
    132  1.2  ttoth 	}
    133  1.2  ttoth 
    134  1.2  ttoth 	KASSERT(old_nref->nref_next != NULL);
    135  1.2  ttoth 
    136  1.2  ttoth 	if (*list == old_nref) {
    137  1.2  ttoth 		*list = old_nref->nref_next;
    138  1.2  ttoth 	} else {
    139  1.2  ttoth 		tmpnref = *list;
    140  1.2  ttoth 		while (tmpnref->nref_next &&
    141  1.2  ttoth 			tmpnref->nref_next != (struct chfs_node_ref *)vc) {
    142  1.2  ttoth 			if (tmpnref->nref_next == old_nref) {
    143  1.2  ttoth 				tmpnref->nref_next = old_nref->nref_next;
    144  1.2  ttoth 				break;
    145  1.2  ttoth 			}
    146  1.2  ttoth 			tmpnref = tmpnref->nref_next;
    147  1.2  ttoth 		}
    148  1.2  ttoth 	}
    149  1.2  ttoth }
    150  1.2  ttoth 
    151  1.2  ttoth /*
    152  1.3  ttoth  * chfs_remove_and_obsolete - removes a node from a list and obsoletes the nref
    153  1.2  ttoth  * We should use this function carefully on data nodes,
    154  1.3  ttoth  * because removing a frag will also obsolete the node ref.
    155  1.2  ttoth  */
    156  1.2  ttoth void
    157  1.2  ttoth chfs_remove_and_obsolete(struct chfs_mount *chmp,
    158  1.2  ttoth 	struct chfs_vnode_cache *vc,
    159  1.2  ttoth 	struct chfs_node_ref *old_nref, struct chfs_node_ref **list)
    160  1.2  ttoth {
    161  1.2  ttoth 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    162  1.2  ttoth 	KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
    163  1.2  ttoth 
    164  1.2  ttoth 	chfs_remove_node_from_list(chmp, vc, old_nref, list);
    165  1.2  ttoth 
    166  1.2  ttoth 	dbg("[MARK] vno: %llu lnr: %u ofs: %u\n", vc->vno, old_nref->nref_lnr,
    167  1.2  ttoth 		old_nref->nref_offset);
    168  1.2  ttoth 	chfs_mark_node_obsolete(chmp, old_nref);
    169  1.2  ttoth }
    170  1.2  ttoth 
    171  1.3  ttoth /* chfs_add_fd_to_inode - adds a directory entry to an inode */
    172  1.1  ahoka void
    173  1.1  ahoka chfs_add_fd_to_inode(struct chfs_mount *chmp,
    174  1.1  ahoka     struct chfs_inode *parent, struct chfs_dirent *new)
    175  1.1  ahoka {
    176  1.1  ahoka 	struct chfs_dirent *fd, *tmpfd;
    177  1.1  ahoka 
    178  1.3  ttoth 	/* update highest version */
    179  1.1  ahoka 	if (new->version > parent->chvc->highest_version) {
    180  1.1  ahoka 		parent->chvc->highest_version = new->version;
    181  1.1  ahoka 	}
    182  1.1  ahoka 
    183  1.1  ahoka 	TAILQ_FOREACH_SAFE(fd, &parent->dents, fds, tmpfd) {
    184  1.1  ahoka 		if (fd->nhash > new->nhash) {
    185  1.1  ahoka 			/* insert new before fd */
    186  1.1  ahoka 			TAILQ_INSERT_BEFORE(fd, new, fds);
    187  1.1  ahoka 			return;
    188  1.1  ahoka 		} else if (fd->nhash == new->nhash &&
    189  1.1  ahoka 		    !strcmp(fd->name, new->name)) {
    190  1.1  ahoka 			if (new->version > fd->version) {
    191  1.1  ahoka 				/* replace fd with new */
    192  1.1  ahoka 				TAILQ_INSERT_BEFORE(fd, new, fds);
    193  1.1  ahoka 				TAILQ_REMOVE(&parent->dents, fd, fds);
    194  1.1  ahoka 				if (fd->nref) {
    195  1.2  ttoth 					mutex_enter(&chmp->chm_lock_vnocache);
    196  1.2  ttoth 					chfs_remove_and_obsolete(chmp, parent->chvc, fd->nref,
    197  1.2  ttoth 						&parent->chvc->dirents);
    198  1.2  ttoth 					mutex_exit(&chmp->chm_lock_vnocache);
    199  1.1  ahoka 				}
    200  1.1  ahoka 				chfs_free_dirent(fd);
    201  1.1  ahoka 			} else {
    202  1.3  ttoth 				/* new is older (normally it's not an option) */
    203  1.1  ahoka 				chfs_mark_node_obsolete(chmp, new->nref);
    204  1.1  ahoka 				chfs_free_dirent(new);
    205  1.1  ahoka 			}
    206  1.1  ahoka 			return;
    207  1.1  ahoka 		}
    208  1.1  ahoka 	}
    209  1.1  ahoka 	/* if we couldnt fit it elsewhere, lets add to the end */
    210  1.1  ahoka 	/* FIXME insert tail or insert head? */
    211  1.1  ahoka 	TAILQ_INSERT_HEAD(&parent->dents, new, fds);
    212  1.3  ttoth }
    213  1.1  ahoka 
    214  1.1  ahoka 
    215  1.3  ttoth /* chfs_add_vnode_ref_to_vc - adds a vnode info to the vnode cache */
    216  1.1  ahoka void
    217  1.1  ahoka chfs_add_vnode_ref_to_vc(struct chfs_mount *chmp,
    218  1.1  ahoka     struct chfs_vnode_cache *vc, struct chfs_node_ref *new)
    219  1.1  ahoka {
    220  1.2  ttoth 	KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
    221  1.2  ttoth 	struct chfs_node_ref *nref;
    222  1.2  ttoth 
    223  1.3  ttoth 	/* store only the last one, drop the others */
    224  1.2  ttoth 	while (vc->v != (struct chfs_node_ref *)vc) {
    225  1.2  ttoth 		nref = vc->v;
    226  1.2  ttoth 		chfs_remove_and_obsolete(chmp, vc, nref, &vc->v);
    227  1.1  ahoka 	}
    228  1.2  ttoth 
    229  1.2  ttoth 	new->nref_next = (struct chfs_node_ref *)vc;
    230  1.1  ahoka 	vc->v = new;
    231  1.1  ahoka }
    232  1.1  ahoka 
    233  1.3  ttoth /* chfs_nref_next - step to the next in-memory nref */
    234  1.1  ahoka struct chfs_node_ref *
    235  1.1  ahoka chfs_nref_next(struct chfs_node_ref *nref)
    236  1.1  ahoka {
    237  1.1  ahoka 	nref++;
    238  1.1  ahoka 	if (nref->nref_lnr == REF_LINK_TO_NEXT) {
    239  1.3  ttoth 		/* end of chain */
    240  1.1  ahoka 		if (!nref->nref_next)
    241  1.1  ahoka 			return NULL;
    242  1.1  ahoka 
    243  1.3  ttoth 		/* link to the next block */
    244  1.1  ahoka 		nref = nref->nref_next;
    245  1.1  ahoka 	}
    246  1.3  ttoth 	/* end of chain */
    247  1.1  ahoka 	if (nref->nref_lnr == REF_EMPTY_NODE)
    248  1.1  ahoka 		return NULL;
    249  1.1  ahoka 
    250  1.1  ahoka 	return nref;
    251  1.1  ahoka }
    252  1.1  ahoka 
    253  1.3  ttoth /* chfs_nref_len - calculates the length of an nref */
    254  1.1  ahoka int
    255  1.1  ahoka chfs_nref_len(struct chfs_mount *chmp,
    256  1.1  ahoka     struct chfs_eraseblock *cheb, struct chfs_node_ref *nref)
    257  1.1  ahoka {
    258  1.1  ahoka 	struct chfs_node_ref *next;
    259  1.1  ahoka 
    260  1.1  ahoka 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    261  1.1  ahoka 
    262  1.1  ahoka 	if (!cheb)
    263  1.1  ahoka 		cheb = &chmp->chm_blocks[nref->nref_lnr];
    264  1.1  ahoka 
    265  1.1  ahoka 	next = chfs_nref_next(nref);
    266  1.1  ahoka 
    267  1.1  ahoka 	if (!next) {
    268  1.1  ahoka 		return chmp->chm_ebh->eb_size - cheb->free_size -
    269  1.1  ahoka 		    CHFS_GET_OFS(nref->nref_offset);
    270  1.1  ahoka 	}
    271  1.1  ahoka 	return CHFS_GET_OFS(next->nref_offset) -
    272  1.1  ahoka 	    CHFS_GET_OFS(nref->nref_offset);
    273  1.1  ahoka }
    274  1.1  ahoka 
    275  1.3  ttoth /* chfs_mark_node_obsolete - marks a node as obsolete */
    276  1.1  ahoka void
    277  1.1  ahoka chfs_mark_node_obsolete(struct chfs_mount *chmp,
    278  1.1  ahoka     struct chfs_node_ref *nref)
    279  1.1  ahoka {
    280  1.1  ahoka 	int len;
    281  1.1  ahoka 	struct chfs_eraseblock *cheb;
    282  1.1  ahoka 
    283  1.1  ahoka 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    284  1.1  ahoka 
    285  1.1  ahoka 	KASSERT(!CHFS_REF_OBSOLETE(nref));
    286  1.1  ahoka 
    287  1.1  ahoka 	KASSERT(nref->nref_lnr <= chmp->chm_ebh->peb_nr);
    288  1.1  ahoka 	cheb = &chmp->chm_blocks[nref->nref_lnr];
    289  1.1  ahoka 
    290  1.1  ahoka #ifdef DIAGNOSTIC
    291  1.1  ahoka 	if (cheb->used_size + cheb->free_size + cheb->dirty_size +
    292  1.1  ahoka 	    cheb->unchecked_size + cheb->wasted_size != chmp->chm_ebh->eb_size) {
    293  1.1  ahoka 		dbg("eraseblock leak detected!\nused: %u\nfree: %u\n"
    294  1.1  ahoka 		    "dirty: %u\nunchecked: %u\nwasted: %u\ntotal: %u\nshould be: %zu\n",
    295  1.1  ahoka 		    cheb->used_size, cheb->free_size, cheb->dirty_size,
    296  1.1  ahoka 		    cheb->unchecked_size, cheb->wasted_size, cheb->used_size + cheb->free_size +
    297  1.1  ahoka 		    cheb->dirty_size + cheb->unchecked_size + cheb->wasted_size,
    298  1.1  ahoka 		    chmp->chm_ebh->eb_size);
    299  1.1  ahoka 	}
    300  1.1  ahoka #endif
    301  1.1  ahoka 
    302  1.1  ahoka 	len = chfs_nref_len(chmp, cheb, nref);
    303  1.1  ahoka 
    304  1.1  ahoka 	mutex_enter(&chmp->chm_lock_sizes);
    305  1.1  ahoka 
    306  1.1  ahoka 	if (CHFS_REF_FLAGS(nref) == CHFS_UNCHECKED_NODE_MASK) {
    307  1.1  ahoka 		chfs_change_size_unchecked(chmp, cheb, -len);
    308  1.1  ahoka 	} else {
    309  1.1  ahoka 		chfs_change_size_used(chmp, cheb, -len);
    310  1.1  ahoka 
    311  1.1  ahoka 		KASSERT(cheb->used_size <= chmp->chm_ebh->eb_size);
    312  1.1  ahoka 	}
    313  1.1  ahoka 	chfs_change_size_dirty(chmp, cheb, len);
    314  1.1  ahoka 
    315  1.1  ahoka #ifdef DIAGNOSTIC
    316  1.1  ahoka 	if (cheb->used_size + cheb->free_size + cheb->dirty_size +
    317  1.1  ahoka 	    cheb->unchecked_size + cheb->wasted_size != chmp->chm_ebh->eb_size) {
    318  1.1  ahoka 		panic("eraseblock leak detected!\nused: %u\nfree: %u\n"
    319  1.1  ahoka 		    "dirty: %u\nunchecked: %u\nwasted: %u\ntotal: %u\nshould be: %zu\n",
    320  1.1  ahoka 		    cheb->used_size, cheb->free_size, cheb->dirty_size,
    321  1.1  ahoka 		    cheb->unchecked_size, cheb->wasted_size, cheb->used_size + cheb->free_size +
    322  1.1  ahoka 		    cheb->dirty_size + cheb->unchecked_size + cheb->wasted_size,
    323  1.1  ahoka 		    chmp->chm_ebh->eb_size);
    324  1.1  ahoka 	}
    325  1.1  ahoka #endif
    326  1.1  ahoka 	nref->nref_offset = CHFS_GET_OFS(nref->nref_offset) |
    327  1.1  ahoka 	    CHFS_OBSOLETE_NODE_MASK;
    328  1.1  ahoka 
    329  1.1  ahoka 	if (chmp->chm_flags & CHFS_MP_FLAG_SCANNING) {
    330  1.1  ahoka 		/*Scan is in progress, do nothing now*/
    331  1.1  ahoka 		mutex_exit(&chmp->chm_lock_sizes);
    332  1.1  ahoka 		return;
    333  1.1  ahoka 	}
    334  1.1  ahoka 
    335  1.1  ahoka 	if (cheb == chmp->chm_nextblock) {
    336  1.1  ahoka 		dbg("Not moving nextblock to dirty/erase_pending list\n");
    337  1.1  ahoka 	} else if (!cheb->used_size && !cheb->unchecked_size) {
    338  1.1  ahoka 		if (cheb == chmp->chm_gcblock) {
    339  1.1  ahoka 			dbg("gcblock is completely dirtied\n");
    340  1.1  ahoka 			chmp->chm_gcblock = NULL;
    341  1.1  ahoka 		} else {
    342  1.3  ttoth 			/* remove from a tailq, but we don't know which tailq contains this cheb
    343  1.3  ttoth 			 * so we remove it from the dirty list now */
    344  1.1  ahoka 			//TAILQ_REMOVE(&chmp->chm_dirty_queue, cheb, queue);
    345  1.1  ahoka 			int removed = 0;
    346  1.1  ahoka 			struct chfs_eraseblock *eb, *tmpeb;
    347  1.1  ahoka 			//XXX ugly code
    348  1.1  ahoka 			TAILQ_FOREACH_SAFE(eb, &chmp->chm_free_queue, queue, tmpeb) {
    349  1.1  ahoka 				if (eb == cheb) {
    350  1.1  ahoka 					TAILQ_REMOVE(&chmp->chm_free_queue, cheb, queue);
    351  1.1  ahoka 					removed = 1;
    352  1.1  ahoka 					break;
    353  1.1  ahoka 				}
    354  1.1  ahoka 			}
    355  1.1  ahoka 			if (removed == 0) {
    356  1.1  ahoka 				TAILQ_FOREACH_SAFE(eb, &chmp->chm_dirty_queue, queue, tmpeb) {
    357  1.1  ahoka 					if (eb == cheb) {
    358  1.1  ahoka 						TAILQ_REMOVE(&chmp->chm_dirty_queue, cheb, queue);
    359  1.1  ahoka 						removed = 1;
    360  1.1  ahoka 						break;
    361  1.1  ahoka 					}
    362  1.1  ahoka 				}
    363  1.1  ahoka 			}
    364  1.1  ahoka 			if (removed == 0) {
    365  1.1  ahoka 				TAILQ_FOREACH_SAFE(eb, &chmp->chm_very_dirty_queue, queue, tmpeb) {
    366  1.1  ahoka 					if (eb == cheb) {
    367  1.1  ahoka 						TAILQ_REMOVE(&chmp->chm_very_dirty_queue, cheb, queue);
    368  1.1  ahoka 						removed = 1;
    369  1.1  ahoka 						break;
    370  1.1  ahoka 					}
    371  1.1  ahoka 				}
    372  1.1  ahoka 			}
    373  1.1  ahoka 			if (removed == 0) {
    374  1.1  ahoka 				TAILQ_FOREACH_SAFE(eb, &chmp->chm_clean_queue, queue, tmpeb) {
    375  1.1  ahoka 					if (eb == cheb) {
    376  1.1  ahoka 						TAILQ_REMOVE(&chmp->chm_clean_queue, cheb, queue);
    377  1.1  ahoka 						removed = 1;
    378  1.1  ahoka 						break;
    379  1.1  ahoka 					}
    380  1.1  ahoka 				}
    381  1.1  ahoka 			}
    382  1.1  ahoka 		}
    383  1.1  ahoka 		if (chmp->chm_wbuf_len) {
    384  1.1  ahoka 			dbg("Adding block to erasable pending wbuf queue\n");
    385  1.1  ahoka 			TAILQ_INSERT_TAIL(&chmp->chm_erasable_pending_wbuf_queue,
    386  1.1  ahoka 			    cheb, queue);
    387  1.1  ahoka 		} else {
    388  1.1  ahoka 			TAILQ_INSERT_TAIL(&chmp->chm_erase_pending_queue,
    389  1.1  ahoka 			    cheb, queue);
    390  1.1  ahoka 			chmp->chm_nr_erasable_blocks++;
    391  1.1  ahoka 		}
    392  1.1  ahoka 		chfs_remap_leb(chmp);
    393  1.1  ahoka 	} else if (cheb == chmp->chm_gcblock) {
    394  1.1  ahoka 		dbg("Not moving gcblock to dirty list\n");
    395  1.1  ahoka 	} else if (cheb->dirty_size > MAX_DIRTY_TO_CLEAN &&
    396  1.1  ahoka 	    cheb->dirty_size - len <= MAX_DIRTY_TO_CLEAN) {
    397  1.1  ahoka 		dbg("Freshly dirtied, remove it from clean queue and "
    398  1.1  ahoka 		    "add it to dirty\n");
    399  1.1  ahoka 		TAILQ_REMOVE(&chmp->chm_clean_queue, cheb, queue);
    400  1.1  ahoka 		TAILQ_INSERT_TAIL(&chmp->chm_dirty_queue, cheb, queue);
    401  1.1  ahoka 	} else if (VERY_DIRTY(chmp, cheb->dirty_size) &&
    402  1.1  ahoka 	    !VERY_DIRTY(chmp, cheb->dirty_size - len)) {
    403  1.1  ahoka 		dbg("Becomes now very dirty, remove it from dirty "
    404  1.1  ahoka 		    "queue and add it to very dirty\n");
    405  1.1  ahoka 		TAILQ_REMOVE(&chmp->chm_dirty_queue, cheb, queue);
    406  1.1  ahoka 		TAILQ_INSERT_TAIL(&chmp->chm_very_dirty_queue, cheb, queue);
    407  1.1  ahoka 	} else {
    408  1.1  ahoka 		dbg("Leave cheb where it is\n");
    409  1.1  ahoka 	}
    410  1.1  ahoka 	mutex_exit(&chmp->chm_lock_sizes);
    411  1.1  ahoka 	return;
    412  1.1  ahoka }
    413  1.1  ahoka 
    414  1.3  ttoth /*
    415  1.1  ahoka  * chfs_close_eraseblock - close an eraseblock
    416  1.1  ahoka  *
    417  1.1  ahoka  * This function close the physical chain of the nodes on the eraseblock,
    418  1.1  ahoka  * convert its free size to dirty and add it to clean, dirty or very dirty list.
    419  1.1  ahoka  */
    420  1.1  ahoka int
    421  1.1  ahoka chfs_close_eraseblock(struct chfs_mount *chmp,
    422  1.1  ahoka     struct chfs_eraseblock *cheb)
    423  1.1  ahoka {
    424  1.1  ahoka 	uint32_t offset;
    425  1.1  ahoka 	struct chfs_node_ref *nref;
    426  1.1  ahoka 
    427  1.1  ahoka 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    428  1.1  ahoka 
    429  1.1  ahoka 	offset = chmp->chm_ebh->eb_size - cheb->free_size;
    430  1.1  ahoka 
    431  1.1  ahoka 	// Close the chain
    432  1.1  ahoka 	nref = chfs_alloc_node_ref(cheb);
    433  1.1  ahoka 	if (!nref)
    434  1.1  ahoka 		return ENOMEM;
    435  1.1  ahoka 
    436  1.1  ahoka 	nref->nref_next = NULL;
    437  1.1  ahoka 	nref->nref_offset = offset;
    438  1.1  ahoka 
    439  1.1  ahoka 	// Mark space as dirty
    440  1.1  ahoka 	chfs_update_eb_dirty(chmp, cheb, cheb->free_size);
    441  1.1  ahoka 
    442  1.1  ahoka 	if (cheb->dirty_size < MAX_DIRTY_TO_CLEAN) {
    443  1.1  ahoka 		TAILQ_INSERT_TAIL(&chmp->chm_clean_queue, cheb, queue);
    444  1.1  ahoka 	} else if (VERY_DIRTY(chmp, cheb->dirty_size)) {
    445  1.1  ahoka 		TAILQ_INSERT_TAIL(&chmp->chm_very_dirty_queue, cheb, queue);
    446  1.1  ahoka 	} else {
    447  1.1  ahoka 		TAILQ_INSERT_TAIL(&chmp->chm_dirty_queue, cheb, queue);
    448  1.1  ahoka 	}
    449  1.1  ahoka 	return 0;
    450  1.1  ahoka }
    451  1.1  ahoka 
    452  1.3  ttoth /*
    453  1.3  ttoth  * chfs_reserve_space_normal -
    454  1.3  ttoth  * checks available space and calls chfs_reserve_space
    455  1.3  ttoth  * used during writing
    456  1.3  ttoth  */
    457  1.1  ahoka int
    458  1.1  ahoka chfs_reserve_space_normal(struct chfs_mount *chmp, uint32_t size, int prio)
    459  1.1  ahoka {
    460  1.1  ahoka 	int ret;
    461  1.1  ahoka 
    462  1.1  ahoka 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    463  1.1  ahoka 
    464  1.1  ahoka 	mutex_enter(&chmp->chm_lock_sizes);
    465  1.1  ahoka 	while (chmp->chm_nr_free_blocks + chmp->chm_nr_erasable_blocks < chmp->chm_resv_blocks_write) {
    466  1.1  ahoka 		dbg("free: %d, erasable: %d, resv: %d\n", chmp->chm_nr_free_blocks, chmp->chm_nr_erasable_blocks, chmp->chm_resv_blocks_write);
    467  1.1  ahoka 		uint32_t avail, dirty;
    468  1.1  ahoka 		if (prio == ALLOC_DELETION && chmp->chm_nr_free_blocks + chmp->chm_nr_erasable_blocks >= chmp->chm_resv_blocks_deletion)
    469  1.1  ahoka 			break;
    470  1.1  ahoka 
    471  1.1  ahoka 		dirty = chmp->chm_dirty_size - chmp->chm_nr_erasable_blocks * chmp->chm_ebh->eb_size + chmp->chm_unchecked_size;
    472  1.1  ahoka 		if (dirty < chmp->chm_nospc_dirty) {
    473  1.1  ahoka 			dbg("dirty: %u < nospc_dirty: %u\n", dirty, chmp->chm_nospc_dirty);
    474  1.1  ahoka 			ret = ENOSPC;
    475  1.1  ahoka 			mutex_exit(&chmp->chm_lock_sizes);
    476  1.1  ahoka 			goto out;
    477  1.1  ahoka 		}
    478  1.1  ahoka 
    479  1.1  ahoka 		avail = chmp->chm_free_size - (chmp->chm_resv_blocks_write * chmp->chm_ebh->eb_size);
    480  1.1  ahoka 		if (size > avail) {
    481  1.1  ahoka 			dbg("size: %u > avail: %u\n", size, avail);
    482  1.1  ahoka 			ret = ENOSPC;
    483  1.1  ahoka 			mutex_exit(&chmp->chm_lock_sizes);
    484  1.1  ahoka 			goto out;
    485  1.1  ahoka 		}
    486  1.1  ahoka 
    487  1.1  ahoka 		mutex_exit(&chmp->chm_lock_sizes);
    488  1.1  ahoka 		ret = chfs_gcollect_pass(chmp);
    489  1.1  ahoka 		mutex_enter(&chmp->chm_lock_sizes);
    490  1.1  ahoka 
    491  1.1  ahoka 		if (chmp->chm_nr_erasable_blocks ||
    492  1.1  ahoka 		    !TAILQ_EMPTY(&chmp->chm_erasable_pending_wbuf_queue) ||
    493  1.1  ahoka 		    ret == EAGAIN) {
    494  1.1  ahoka 			ret = chfs_remap_leb(chmp);
    495  1.1  ahoka 		}
    496  1.1  ahoka 
    497  1.1  ahoka 		if (ret) {
    498  1.1  ahoka 			mutex_exit(&chmp->chm_lock_sizes);
    499  1.1  ahoka 			goto out;
    500  1.1  ahoka 		}
    501  1.1  ahoka 	}
    502  1.1  ahoka 
    503  1.1  ahoka 	mutex_exit(&chmp->chm_lock_sizes);
    504  1.1  ahoka 	ret = chfs_reserve_space(chmp, size);
    505  1.1  ahoka out:
    506  1.1  ahoka 	return ret;
    507  1.1  ahoka }
    508  1.1  ahoka 
    509  1.1  ahoka 
    510  1.3  ttoth /* chfs_reserve_space_gc - tries to reserve space for GC */
    511  1.1  ahoka int
    512  1.1  ahoka chfs_reserve_space_gc(struct chfs_mount *chmp, uint32_t size)
    513  1.1  ahoka {
    514  1.1  ahoka 	int ret;
    515  1.1  ahoka 
    516  1.1  ahoka 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    517  1.1  ahoka 
    518  1.1  ahoka 	mutex_enter(&chmp->chm_lock_sizes);
    519  1.1  ahoka 	chfs_remap_leb(chmp);
    520  1.1  ahoka 
    521  1.1  ahoka 	if (size > chmp->chm_free_size) {
    522  1.1  ahoka 		dbg("size: %u\n", size);
    523  1.1  ahoka 		mutex_exit(&chmp->chm_lock_sizes);
    524  1.1  ahoka 		return ENOSPC;
    525  1.1  ahoka 	}
    526  1.1  ahoka 
    527  1.1  ahoka 	mutex_exit(&chmp->chm_lock_sizes);
    528  1.1  ahoka 	ret = chfs_reserve_space(chmp, size);
    529  1.1  ahoka 	return ret;
    530  1.1  ahoka }
    531  1.1  ahoka 
    532  1.3  ttoth /*
    533  1.1  ahoka  * chfs_reserve_space - finds a block which free size is >= requested size
    534  1.1  ahoka  * Returns zero in case of success, error code in case of fail.
    535  1.1  ahoka  */
    536  1.1  ahoka int
    537  1.1  ahoka chfs_reserve_space(struct chfs_mount *chmp, uint32_t size)
    538  1.1  ahoka {
    539  1.1  ahoka 	//TODO define minimum reserved blocks, which is needed for writing
    540  1.1  ahoka 	//TODO check we have enough free blocks to write
    541  1.1  ahoka 	//TODO if no: need erase and GC
    542  1.1  ahoka 
    543  1.1  ahoka 	int err;
    544  1.1  ahoka 	struct chfs_eraseblock *cheb;
    545  1.1  ahoka 
    546  1.1  ahoka 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    547  1.1  ahoka 	KASSERT(!mutex_owned(&chmp->chm_lock_sizes));
    548  1.1  ahoka 
    549  1.1  ahoka 	cheb = chmp->chm_nextblock;
    550  1.1  ahoka 	if (cheb && size > cheb->free_size) {
    551  1.1  ahoka 		dbg("size: %u > free_size: %u\n", size, cheb->free_size);
    552  1.1  ahoka 		/*
    553  1.1  ahoka 		 * There isn't enough space on this eraseblock, we mark this as
    554  1.1  ahoka 		 * dirty and close the physical chain of the node refs.
    555  1.1  ahoka 		 */
    556  1.1  ahoka 		//Write out pending data if any
    557  1.1  ahoka 		if (chmp->chm_wbuf_len) {
    558  1.1  ahoka 			chfs_flush_pending_wbuf(chmp);
    559  1.1  ahoka 			//FIXME need goto restart here?
    560  1.1  ahoka 		}
    561  1.1  ahoka 
    562  1.1  ahoka 		while (chmp->chm_wbuf_ofs < chmp->chm_ebh->eb_size) {
    563  1.1  ahoka 			dbg("wbuf ofs: %zu - eb_size: %zu\n",
    564  1.1  ahoka 			    chmp->chm_wbuf_ofs, chmp->chm_ebh->eb_size);
    565  1.1  ahoka 			chfs_flush_pending_wbuf(chmp);
    566  1.1  ahoka 		}
    567  1.1  ahoka 
    568  1.1  ahoka 		if (!(chmp->chm_wbuf_ofs % chmp->chm_ebh->eb_size) && !chmp->chm_wbuf_len)
    569  1.1  ahoka 			chmp->chm_wbuf_ofs = 0xffffffff;
    570  1.1  ahoka 
    571  1.1  ahoka 		err = chfs_close_eraseblock(chmp, cheb);
    572  1.1  ahoka 		if (err)
    573  1.1  ahoka 			return err;
    574  1.1  ahoka 
    575  1.1  ahoka 		cheb = NULL;
    576  1.1  ahoka 	}
    577  1.1  ahoka 	if (!cheb) {
    578  1.1  ahoka 		//get a block for nextblock
    579  1.1  ahoka 		if (TAILQ_EMPTY(&chmp->chm_free_queue)) {
    580  1.1  ahoka 			// If this succeeds there will be a block on free_queue
    581  1.1  ahoka 			dbg("cheb remap (free: %d)\n", chmp->chm_nr_free_blocks);
    582  1.1  ahoka 			err = chfs_remap_leb(chmp);
    583  1.1  ahoka 			if (err)
    584  1.1  ahoka 				return err;
    585  1.1  ahoka 		}
    586  1.1  ahoka 		cheb = TAILQ_FIRST(&chmp->chm_free_queue);
    587  1.1  ahoka 		TAILQ_REMOVE(&chmp->chm_free_queue, cheb, queue);
    588  1.1  ahoka 		chmp->chm_nextblock = cheb;
    589  1.1  ahoka 		chmp->chm_nr_free_blocks--;
    590  1.1  ahoka 	}
    591  1.1  ahoka 
    592  1.1  ahoka 	return 0;
    593  1.1  ahoka }
    594  1.1  ahoka 
    595