Home | History | Annotate | Line # | Download | only in chfs
      1 /*	$NetBSD: chfs_scan.c,v 1.10 2021/07/16 21:18:41 andvar Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2010 Department of Software Engineering,
      5  *		      University of Szeged, Hungary
      6  * Copyright (c) 2010 David Tengeri <dtengeri (at) inf.u-szeged.hu>
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to The NetBSD Foundation
     10  * by the Department of Software Engineering, University of Szeged, Hungary
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     31  * SUCH DAMAGE.
     32  */
     33 
     34 #include "chfs.h"
     35 
     36 /*
     37  * chfs_scan_make_vnode_cache - makes a new vnode cache during scan
     38  * This function returns a vnode cache belonging to @vno.
     39  */
     40 struct chfs_vnode_cache *
     41 chfs_scan_make_vnode_cache(struct chfs_mount *chmp, ino_t vno)
     42 {
     43 	struct chfs_vnode_cache *vc;
     44 
     45 	KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
     46 
     47 	/* vnode cache already exists */
     48 	vc = chfs_vnode_cache_get(chmp, vno);
     49 	if (vc) {
     50 		return vc;
     51 	}
     52 
     53 	/* update max vnode number if needed */
     54 	if (vno > chmp->chm_max_vno) {
     55 		chmp->chm_max_vno = vno;
     56 	}
     57 
     58 	/* create new vnode cache */
     59 	vc = chfs_vnode_cache_alloc(vno);
     60 
     61 	chfs_vnode_cache_add(chmp, vc);
     62 
     63 	if (vno == CHFS_ROOTINO) {
     64 		vc->nlink = 2;
     65 		vc->pvno = CHFS_ROOTINO;
     66 		vc->state = VNO_STATE_CHECKEDABSENT;
     67 	}
     68 
     69 	return vc;
     70 }
     71 
     72 /*
     73  * chfs_scan_check_node_hdr - checks node magic and crc
     74  * Returns 0 if everything is OK, error code otherwise.
     75  */
     76 int
     77 chfs_scan_check_node_hdr(struct chfs_flash_node_hdr *nhdr)
     78 {
     79 	uint16_t magic;
     80 	uint32_t crc, hdr_crc;
     81 
     82 	magic = le16toh(nhdr->magic);
     83 
     84 	if (magic != CHFS_FS_MAGIC_BITMASK) {
     85 		dbg("bad magic\n");
     86 		return CHFS_NODE_BADMAGIC;
     87 	}
     88 
     89 	hdr_crc = le32toh(nhdr->hdr_crc);
     90 	crc = crc32(0, (uint8_t *)nhdr, CHFS_NODE_HDR_SIZE - 4);
     91 
     92 	if (crc != hdr_crc) {
     93 		dbg("bad crc\n");
     94 		return CHFS_NODE_BADCRC;
     95 	}
     96 
     97 	return CHFS_NODE_OK;
     98 }
     99 
    100 /* chfs_scan_check_vnode - check vnode crc and add it to vnode cache */
    101 int
    102 chfs_scan_check_vnode(struct chfs_mount *chmp,
    103     struct chfs_eraseblock *cheb, void *buf, off_t ofs)
    104 {
    105 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    106 	struct chfs_vnode_cache *vc;
    107 	struct chfs_flash_vnode *vnode = buf;
    108 	struct chfs_node_ref *nref;
    109 	int err;
    110 	uint32_t crc;
    111 	ino_t vno;
    112 
    113 	crc = crc32(0, (uint8_t *)vnode,
    114 	    sizeof(struct chfs_flash_vnode) - 4);
    115 
    116 	/* check node crc */
    117 	if (crc != le32toh(vnode->node_crc)) {
    118 		err = chfs_update_eb_dirty(chmp,
    119 		    cheb, le32toh(vnode->length));
    120 		if (err) {
    121 			return err;
    122 		}
    123 
    124 		return CHFS_NODE_BADCRC;
    125 	}
    126 
    127 	vno = le64toh(vnode->vno);
    128 
    129 	/* find the corresponding vnode cache */
    130 	mutex_enter(&chmp->chm_lock_vnocache);
    131 	vc = chfs_vnode_cache_get(chmp, vno);
    132 	if (!vc) {
    133 		vc = chfs_scan_make_vnode_cache(chmp, vno);
    134 		if (!vc) {
    135 			mutex_exit(&chmp->chm_lock_vnocache);
    136 			return ENOMEM;
    137 		}
    138 	}
    139 
    140 	nref = chfs_alloc_node_ref(cheb);
    141 
    142 	nref->nref_offset = ofs;
    143 
    144 	KASSERT(nref->nref_lnr == cheb->lnr);
    145 
    146 	/* check version of vnode */
    147 	if ((struct chfs_vnode_cache *)vc->v != vc) {
    148 		if (le64toh(vnode->version) > *vc->vno_version) {
    149 			*vc->vno_version = le64toh(vnode->version);
    150 			chfs_add_vnode_ref_to_vc(chmp, vc, nref);
    151 		} else {
    152 			err = chfs_update_eb_dirty(chmp, cheb,
    153 			    sizeof(struct chfs_flash_vnode));
    154 			mutex_exit(&chmp->chm_lock_vnocache);
    155 			return CHFS_NODE_OK;
    156 		}
    157 	} else {
    158 		vc->vno_version = kmem_alloc(sizeof(uint64_t), KM_SLEEP);
    159 		*vc->vno_version = le64toh(vnode->version);
    160 		chfs_add_vnode_ref_to_vc(chmp, vc, nref);
    161 	}
    162 	mutex_exit(&chmp->chm_lock_vnocache);
    163 
    164 	/* update sizes */
    165 	mutex_enter(&chmp->chm_lock_sizes);
    166 	chfs_change_size_free(chmp, cheb, -le32toh(vnode->length));
    167 	chfs_change_size_used(chmp, cheb, le32toh(vnode->length));
    168 	mutex_exit(&chmp->chm_lock_sizes);
    169 
    170 	KASSERT(cheb->used_size <= chmp->chm_ebh->eb_size);
    171 
    172 	KASSERT(cheb->used_size + cheb->free_size + cheb->dirty_size + cheb->unchecked_size + cheb->wasted_size == chmp->chm_ebh->eb_size);
    173 
    174 	return CHFS_NODE_OK;
    175 }
    176 
    177 /* chfs_scan_mark_dirent_obsolete - marks a directory entry "obsolete" */
    178 int
    179 chfs_scan_mark_dirent_obsolete(struct chfs_mount *chmp,
    180     struct chfs_vnode_cache *vc, struct chfs_dirent *fd)
    181 {
    182 	struct chfs_eraseblock *cheb __diagused;
    183 	struct chfs_node_ref *prev, *nref;
    184 
    185 	nref = fd->nref;
    186 	cheb = &chmp->chm_blocks[fd->nref->nref_lnr];
    187 
    188 	/* remove dirent's node ref from vnode cache */
    189 	prev = vc->dirents;
    190 	if (prev && prev == nref) {
    191 		vc->dirents = prev->nref_next;
    192 	} else if (prev && prev != (void *)vc) {
    193 		while (prev->nref_next && prev->nref_next != (void *)vc) {
    194 			if (prev->nref_next == nref) {
    195 				prev->nref_next = nref->nref_next;
    196 				break;
    197 			}
    198 			prev = prev->nref_next;
    199 		}
    200 	}
    201 
    202 	KASSERT(cheb->used_size + cheb->free_size + cheb->dirty_size +
    203 	    cheb->unchecked_size + cheb->wasted_size == chmp->chm_ebh->eb_size);
    204 
    205 	return 0;
    206 }
    207 
    208 /* chfs_add_fd_to_list - adds a directory entry to its parent's vnode cache */
    209 void
    210 chfs_add_fd_to_list(struct chfs_mount *chmp,
    211     struct chfs_dirent *new, struct chfs_vnode_cache *pvc)
    212 {
    213 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    214 	int size;
    215 	struct chfs_eraseblock *cheb, *oldcheb;
    216 	struct chfs_dirent *fd, *tmpfd;
    217 
    218 	dbg("adding fd to list: %s\n", new->name);
    219 
    220 	/* update highest version if needed */
    221 	if ((new->version > pvc->highest_version))
    222 		pvc->highest_version = new->version;
    223 
    224 	size = CHFS_PAD(sizeof(struct chfs_flash_dirent_node) +
    225 	    new->nsize);
    226 	cheb = &chmp->chm_blocks[new->nref->nref_lnr];
    227 
    228 	mutex_enter(&chmp->chm_lock_sizes);
    229 	TAILQ_FOREACH_SAFE(fd, &pvc->scan_dirents, fds, tmpfd) {
    230 		if (fd->nhash > new->nhash) {
    231 			/* insert new before fd */
    232 			TAILQ_INSERT_BEFORE(fd, new, fds);
    233 			goto out;
    234 		} else if (fd->nhash == new->nhash &&
    235 		    !strcmp(fd->name, new->name)) {
    236 			if (new->version > fd->version) {
    237 				/* replace fd with new */
    238 				TAILQ_INSERT_BEFORE(fd, new, fds);
    239 				chfs_change_size_free(chmp, cheb, -size);
    240 				chfs_change_size_used(chmp, cheb, size);
    241 
    242 				TAILQ_REMOVE(&pvc->scan_dirents, fd, fds);
    243 				if (fd->nref) {
    244 					size = CHFS_PAD(sizeof(struct chfs_flash_dirent_node) + fd->nsize);
    245 					chfs_scan_mark_dirent_obsolete(chmp, pvc, fd);
    246 					oldcheb = &chmp->chm_blocks[fd->nref->nref_lnr];
    247 					chfs_change_size_used(chmp, oldcheb, -size);
    248 					chfs_change_size_dirty(chmp, oldcheb, size);
    249 				}
    250 				chfs_free_dirent(fd);
    251 			} else {
    252 				/* new dirent is older */
    253 				chfs_scan_mark_dirent_obsolete(chmp, pvc, new);
    254 				chfs_change_size_free(chmp, cheb, -size);
    255 				chfs_change_size_dirty(chmp, cheb, size);
    256 				chfs_free_dirent(new);
    257 			}
    258 			mutex_exit(&chmp->chm_lock_sizes);
    259 			return;
    260 		}
    261 	}
    262 	/* if we couldnt fit it elsewhere, lets add to the end */
    263 	TAILQ_INSERT_TAIL(&pvc->scan_dirents, new, fds);
    264 
    265 out:
    266 	/* update sizes */
    267 	chfs_change_size_free(chmp, cheb, -size);
    268 	chfs_change_size_used(chmp, cheb, size);
    269 	mutex_exit(&chmp->chm_lock_sizes);
    270 
    271 	KASSERT(cheb->used_size <= chmp->chm_ebh->eb_size);
    272 
    273 	KASSERT(cheb->used_size + cheb->free_size + cheb->dirty_size + cheb->unchecked_size + cheb->wasted_size == chmp->chm_ebh->eb_size);
    274 }
    275 
    276 /* chfs_scan_check_dirent_node - check vnode crc and add to vnode cache */
    277 int
    278 chfs_scan_check_dirent_node(struct chfs_mount *chmp,
    279     struct chfs_eraseblock *cheb, void *buf, off_t ofs)
    280 {
    281 	int err, namelen;
    282 	uint32_t crc;
    283 	struct chfs_dirent *fd;
    284 	struct chfs_vnode_cache *parentvc;
    285 	struct chfs_flash_dirent_node *dirent = buf;
    286 
    287 	/* check crc */
    288 	crc = crc32(0, (uint8_t *)dirent, sizeof(*dirent) - 4);
    289 	if (crc != le32toh(dirent->node_crc)) {
    290 		err = chfs_update_eb_dirty(chmp, cheb, le32toh(dirent->length));
    291 		if (err)
    292 			return err;
    293 		return CHFS_NODE_BADCRC;
    294 	}
    295 
    296 	/* allocate space for name */
    297 	namelen = dirent->nsize;
    298 
    299 	fd = chfs_alloc_dirent(namelen + 1);
    300 	if (!fd)
    301 		return ENOMEM;
    302 
    303 	/* allocate an nref */
    304 	fd->nref = chfs_alloc_node_ref(cheb);
    305 	if (!fd->nref)
    306 		return ENOMEM;
    307 
    308 	KASSERT(fd->nref->nref_lnr == cheb->lnr);
    309 
    310 	memcpy(&fd->name, dirent->name, namelen);
    311 	fd->nsize = namelen;
    312 	fd->name[namelen] = 0;
    313 	crc = crc32(0, fd->name, dirent->nsize);
    314 	if (crc != le32toh(dirent->name_crc)) {
    315 		chfs_err("Directory entry's name has bad crc: read: 0x%x, "
    316 		    "calculated: 0x%x\n", le32toh(dirent->name_crc), crc);
    317 		chfs_free_dirent(fd);
    318 		err = chfs_update_eb_dirty(chmp, cheb, le32toh(dirent->length));
    319 		if (err)
    320 			return err;
    321 		return CHFS_NODE_BADNAMECRC;
    322 	}
    323 
    324 	/* check vnode_cache of parent node */
    325 	mutex_enter(&chmp->chm_lock_vnocache);
    326 	parentvc = chfs_scan_make_vnode_cache(chmp, le64toh(dirent->pvno));
    327 	if (!parentvc) {
    328 		chfs_free_dirent(fd);
    329 		mutex_exit(&chmp->chm_lock_vnocache);
    330 		return ENOMEM;
    331 	}
    332 
    333 	fd->nref->nref_offset = ofs;
    334 
    335 	dbg("add dirent to #%llu\n", (unsigned long long)parentvc->vno);
    336 	chfs_add_node_to_list(chmp, parentvc, fd->nref, &parentvc->dirents);
    337 	mutex_exit(&chmp->chm_lock_vnocache);
    338 
    339 	fd->vno = le64toh(dirent->vno);
    340 	fd->version = le64toh(dirent->version);
    341 	fd->nhash = hash32_buf(fd->name, namelen, HASH32_BUF_INIT);
    342 	fd->type = dirent->dtype;
    343 
    344 	chfs_add_fd_to_list(chmp, fd, parentvc);
    345 
    346 	return CHFS_NODE_OK;
    347 }
    348 
    349 /* chfs_scan_check_data_node - check vnode crc and add to vnode cache */
    350 int
    351 chfs_scan_check_data_node(struct chfs_mount *chmp,
    352     struct chfs_eraseblock *cheb, void *buf, off_t ofs)
    353 {
    354 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    355 	int err;
    356 	uint32_t crc, vno;
    357 	struct chfs_node_ref *nref;
    358 	struct chfs_vnode_cache *vc;
    359 	struct chfs_flash_data_node *dnode = buf;
    360 
    361 	/* check crc */
    362 	crc = crc32(0, (uint8_t *)dnode, sizeof(struct chfs_flash_data_node) - 4);
    363 	if (crc != le32toh(dnode->node_crc)) {
    364 		err = chfs_update_eb_dirty(chmp, cheb, le32toh(dnode->length));
    365 		if (err)
    366 			return err;
    367 		return CHFS_NODE_BADCRC;
    368 	}
    369 	/*
    370 	 * Don't check data nodes crc and version here, it will be done in
    371 	 * the background GC thread.
    372 	 */
    373 	nref = chfs_alloc_node_ref(cheb);
    374 	if (!nref)
    375 		return ENOMEM;
    376 
    377 	nref->nref_offset = CHFS_GET_OFS(ofs) | CHFS_UNCHECKED_NODE_MASK;
    378 
    379 	KASSERT(nref->nref_lnr == cheb->lnr);
    380 
    381 	vno = le64toh(dnode->vno);
    382 	mutex_enter(&chmp->chm_lock_vnocache);
    383 	vc = chfs_vnode_cache_get(chmp, vno);
    384 	if (!vc) {
    385 		vc = chfs_scan_make_vnode_cache(chmp, vno);
    386 		if (!vc) {
    387 			mutex_exit(&chmp->chm_lock_vnocache);
    388 			return ENOMEM;
    389 		}
    390 	}
    391 	chfs_add_node_to_list(chmp, vc, nref, &vc->dnode);
    392 	mutex_exit(&chmp->chm_lock_vnocache);
    393 
    394 	dbg("chmpfree: %u, chebfree: %u, dnode: %u\n", chmp->chm_free_size, cheb->free_size, dnode->length);
    395 
    396 	/* update sizes */
    397 	mutex_enter(&chmp->chm_lock_sizes);
    398 	chfs_change_size_free(chmp, cheb, -dnode->length);
    399 	chfs_change_size_unchecked(chmp, cheb, dnode->length);
    400 	mutex_exit(&chmp->chm_lock_sizes);
    401 	return CHFS_NODE_OK;
    402 }
    403 
    404 /* chfs_scan_classify_cheb - determine eraseblock's state */
    405 int
    406 chfs_scan_classify_cheb(struct chfs_mount *chmp,
    407     struct chfs_eraseblock *cheb)
    408 {
    409 	if (cheb->free_size == chmp->chm_ebh->eb_size)
    410 		return CHFS_BLK_STATE_FREE;
    411 	else if (cheb->dirty_size < MAX_DIRTY_TO_CLEAN)
    412 		return CHFS_BLK_STATE_CLEAN;
    413 	else if (cheb->used_size || cheb->unchecked_size)
    414 		return CHFS_BLK_STATE_PARTDIRTY;
    415 	else
    416 		return CHFS_BLK_STATE_ALLDIRTY;
    417 }
    418 
    419 
    420 /*
    421  * chfs_scan_eraseblock - scans an eraseblock and looking for nodes
    422  *
    423  * This function scans a whole eraseblock, checks the nodes on it and add them
    424  * to the vnode cache.
    425  * Returns eraseblock state on success, error code if fails.
    426  */
    427 int
    428 chfs_scan_eraseblock(struct chfs_mount *chmp,
    429     struct chfs_eraseblock *cheb)
    430 {
    431 	int err;
    432 	size_t len, retlen;
    433 	off_t ofs = 0;
    434 	int lnr = cheb->lnr;
    435 	u_char *buf;
    436 	struct chfs_flash_node_hdr *nhdr;
    437 	int read_free = 0;
    438 	struct chfs_node_ref *nref;
    439 
    440 	dbg("scanning eraseblock content: %d free_size: %d\n", cheb->lnr, cheb->free_size);
    441 	dbg("scanned physical block: %d\n", chmp->chm_ebh->lmap[lnr]);
    442 	buf = kmem_alloc(CHFS_MAX_NODE_SIZE, KM_SLEEP);
    443 
    444 	while((ofs + CHFS_NODE_HDR_SIZE) < chmp->chm_ebh->eb_size) {
    445 		memset(buf, 0 , CHFS_MAX_NODE_SIZE);
    446 		err = chfs_read_leb(chmp,
    447 		    lnr, buf, ofs, CHFS_NODE_HDR_SIZE, &retlen);
    448 		if (err)
    449 			goto err_return;
    450 
    451 		if (retlen != CHFS_NODE_HDR_SIZE) {
    452 			chfs_err("Error reading node header: "
    453 			    "read: %zu instead of: %zu\n",
    454 			    CHFS_NODE_HDR_SIZE, retlen);
    455 			err = EIO;
    456 			goto err_return;
    457 		}
    458 
    459 		/* first we check if the buffer we read is full with 0xff, if yes maybe
    460 		 * the blocks remaining area is free. We increase read_free and if it
    461 		 * reaches MAX_READ_FREE we stop reading the block */
    462 		if (check_pattern(buf, 0xff, 0, CHFS_NODE_HDR_SIZE)) {
    463 			read_free += CHFS_NODE_HDR_SIZE;
    464 			if (read_free >= MAX_READ_FREE(chmp)) {
    465 				dbg("rest of the block is free. Size: %d\n", cheb->free_size);
    466 				kmem_free(buf, CHFS_MAX_NODE_SIZE);
    467 				return chfs_scan_classify_cheb(chmp, cheb);
    468 			}
    469 			ofs += CHFS_NODE_HDR_SIZE;
    470 			continue;
    471 		} else {
    472 			chfs_update_eb_dirty(chmp, cheb, read_free);
    473 			read_free = 0;
    474 		}
    475 
    476 		nhdr = (struct chfs_flash_node_hdr *)buf;
    477 
    478 		err = chfs_scan_check_node_hdr(nhdr);
    479 		if (err) {
    480 			dbg("node hdr error\n");
    481 			err = chfs_update_eb_dirty(chmp, cheb, 4);
    482 			if (err)
    483 				goto err_return;
    484 
    485 			ofs += 4;
    486 			continue;
    487 		}
    488 		ofs += CHFS_NODE_HDR_SIZE;
    489 		if (ofs > chmp->chm_ebh->eb_size) {
    490 			chfs_err("Second part of node is on the next eraseblock.\n");
    491 			err = EIO;
    492 			goto err_return;
    493 		}
    494 		switch (le16toh(nhdr->type)) {
    495 		case CHFS_NODETYPE_VNODE:
    496 		/* vnode information */
    497 			/* read up the node */
    498 			len = le32toh(nhdr->length) - CHFS_NODE_HDR_SIZE;
    499 			err = chfs_read_leb(chmp,
    500 			    lnr, buf + CHFS_NODE_HDR_SIZE,
    501 			    ofs, len,  &retlen);
    502 			if (err)
    503 				goto err_return;
    504 
    505 			if (retlen != len) {
    506 				chfs_err("Error reading vnode: read: %zu instead of: %zu\n",
    507 				    len, retlen);
    508 				err = EIO;
    509 				goto err_return;
    510 			}
    511 			KASSERT(lnr == cheb->lnr);
    512 			err = chfs_scan_check_vnode(chmp,
    513 			    cheb, buf, ofs - CHFS_NODE_HDR_SIZE);
    514 			if (err)
    515 				goto err_return;
    516 
    517 			break;
    518 		case CHFS_NODETYPE_DIRENT:
    519 		/* directory entry */
    520 			/* read up the node */
    521 			len = le32toh(nhdr->length) - CHFS_NODE_HDR_SIZE;
    522 
    523 			err = chfs_read_leb(chmp,
    524 			    lnr, buf + CHFS_NODE_HDR_SIZE,
    525 			    ofs, len, &retlen);
    526 			if (err)
    527 				goto err_return;
    528 
    529 			if (retlen != len) {
    530 				chfs_err("Error reading dirent node: read: %zu "
    531 				    "instead of: %zu\n", len, retlen);
    532 				err = EIO;
    533 				goto err_return;
    534 			}
    535 
    536 			KASSERT(lnr == cheb->lnr);
    537 
    538 			err = chfs_scan_check_dirent_node(chmp,
    539 			    cheb, buf, ofs - CHFS_NODE_HDR_SIZE);
    540 			if (err)
    541 				goto err_return;
    542 
    543 			break;
    544 		case CHFS_NODETYPE_DATA:
    545 		/* data node */
    546 			len = sizeof(struct chfs_flash_data_node) -
    547 			    CHFS_NODE_HDR_SIZE;
    548 			err = chfs_read_leb(chmp,
    549 			    lnr, buf + CHFS_NODE_HDR_SIZE,
    550 			    ofs, len, &retlen);
    551 			if (err)
    552 				goto err_return;
    553 
    554 			if (retlen != len) {
    555 				chfs_err("Error reading data node: read: %zu "
    556 				    "instead of: %zu\n", len, retlen);
    557 				err = EIO;
    558 				goto err_return;
    559 			}
    560 			KASSERT(lnr == cheb->lnr);
    561 			err = chfs_scan_check_data_node(chmp,
    562 			    cheb, buf, ofs - CHFS_NODE_HDR_SIZE);
    563 			if (err)
    564 				goto err_return;
    565 
    566 			break;
    567 		case CHFS_NODETYPE_PADDING:
    568 		/* padding node, set size and update dirty */
    569 			nref = chfs_alloc_node_ref(cheb);
    570 			nref->nref_offset = ofs - CHFS_NODE_HDR_SIZE;
    571 			nref->nref_offset = CHFS_GET_OFS(nref->nref_offset) |
    572 			    CHFS_OBSOLETE_NODE_MASK;
    573 
    574 			err = chfs_update_eb_dirty(chmp, cheb,
    575 			    le32toh(nhdr->length));
    576 			if (err)
    577 				goto err_return;
    578 
    579 			break;
    580 		default:
    581 		/* unknown node type, update dirty and skip */
    582 			err = chfs_update_eb_dirty(chmp, cheb,
    583 			    le32toh(nhdr->length));
    584 			if (err)
    585 				goto err_return;
    586 
    587 			break;
    588 		}
    589 		ofs += le32toh(nhdr->length) - CHFS_NODE_HDR_SIZE;
    590 	}
    591 
    592 	KASSERT(cheb->used_size + cheb->free_size + cheb->dirty_size +
    593 	    cheb->unchecked_size + cheb->wasted_size == chmp->chm_ebh->eb_size);
    594 
    595 	err = chfs_scan_classify_cheb(chmp, cheb);
    596 	/* FALLTHROUGH */
    597     err_return:
    598 	kmem_free(buf, CHFS_MAX_NODE_SIZE);
    599 	return err;
    600 }
    601