chfs_readinode.c revision 1.2.6.2 1 1.2.6.2 yamt /* $NetBSD: chfs_readinode.c,v 1.2.6.2 2012/04/17 00:08:54 yamt Exp $ */
2 1.2.6.2 yamt
3 1.2.6.2 yamt /*-
4 1.2.6.2 yamt * Copyright (c) 2010 Department of Software Engineering,
5 1.2.6.2 yamt * University of Szeged, Hungary
6 1.2.6.2 yamt * Copyright (C) 2010 David Tengeri <dtengeri (at) inf.u-szeged.hu>
7 1.2.6.2 yamt * Copyright (C) 2010 Tamas Toth <ttoth (at) inf.u-szeged.hu>
8 1.2.6.2 yamt * Copyright (C) 2010 Adam Hoka <ahoka (at) NetBSD.org>
9 1.2.6.2 yamt * All rights reserved.
10 1.2.6.2 yamt *
11 1.2.6.2 yamt * This code is derived from software contributed to The NetBSD Foundation
12 1.2.6.2 yamt * by the Department of Software Engineering, University of Szeged, Hungary
13 1.2.6.2 yamt *
14 1.2.6.2 yamt * Redistribution and use in source and binary forms, with or without
15 1.2.6.2 yamt * modification, are permitted provided that the following conditions
16 1.2.6.2 yamt * are met:
17 1.2.6.2 yamt * 1. Redistributions of source code must retain the above copyright
18 1.2.6.2 yamt * notice, this list of conditions and the following disclaimer.
19 1.2.6.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
20 1.2.6.2 yamt * notice, this list of conditions and the following disclaimer in the
21 1.2.6.2 yamt * documentation and/or other materials provided with the distribution.
22 1.2.6.2 yamt *
23 1.2.6.2 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 1.2.6.2 yamt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 1.2.6.2 yamt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 1.2.6.2 yamt * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 1.2.6.2 yamt * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 1.2.6.2 yamt * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 1.2.6.2 yamt * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 1.2.6.2 yamt * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 1.2.6.2 yamt * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 1.2.6.2 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 1.2.6.2 yamt * SUCH DAMAGE.
34 1.2.6.2 yamt */
35 1.2.6.2 yamt
36 1.2.6.2 yamt /*
37 1.2.6.2 yamt * chfs_readinode.c
38 1.2.6.2 yamt *
39 1.2.6.2 yamt * Created on: 2010.05.31.
40 1.2.6.2 yamt * Author: dtengeri
41 1.2.6.2 yamt */
42 1.2.6.2 yamt
43 1.2.6.2 yamt #include <sys/buf.h>
44 1.2.6.2 yamt
45 1.2.6.2 yamt #include "chfs.h"
46 1.2.6.2 yamt
47 1.2.6.2 yamt /* tmp node operations */
48 1.2.6.2 yamt int chfs_check_td_data(struct chfs_mount *,
49 1.2.6.2 yamt struct chfs_tmp_dnode *);
50 1.2.6.2 yamt int chfs_check_td_node(struct chfs_mount *,
51 1.2.6.2 yamt struct chfs_tmp_dnode *);
52 1.2.6.2 yamt struct chfs_node_ref *chfs_first_valid_data_ref(struct chfs_node_ref *);
53 1.2.6.2 yamt int chfs_add_tmp_dnode_to_tree(struct chfs_mount *,
54 1.2.6.2 yamt struct chfs_readinode_info *,
55 1.2.6.2 yamt struct chfs_tmp_dnode *);
56 1.2.6.2 yamt void chfs_add_tmp_dnode_to_tdi(struct chfs_tmp_dnode_info *,
57 1.2.6.2 yamt struct chfs_tmp_dnode *);
58 1.2.6.2 yamt void chfs_remove_tmp_dnode_from_tdi(struct chfs_tmp_dnode_info *,
59 1.2.6.2 yamt struct chfs_tmp_dnode *);
60 1.2.6.2 yamt static void chfs_kill_td(struct chfs_mount *,
61 1.2.6.2 yamt struct chfs_tmp_dnode *);
62 1.2.6.2 yamt static void chfs_kill_tdi(struct chfs_mount *,
63 1.2.6.2 yamt struct chfs_tmp_dnode_info *);
64 1.2.6.2 yamt /* frag node operations */
65 1.2.6.2 yamt struct chfs_node_frag *new_fragment(struct chfs_full_dnode *,
66 1.2.6.2 yamt uint32_t,
67 1.2.6.2 yamt uint32_t);
68 1.2.6.2 yamt int no_overlapping_node(struct rb_tree *, struct chfs_node_frag *,
69 1.2.6.2 yamt struct chfs_node_frag *, uint32_t);
70 1.2.6.2 yamt int chfs_add_frag_to_fragtree(struct chfs_mount *,
71 1.2.6.2 yamt struct rb_tree *,
72 1.2.6.2 yamt struct chfs_node_frag *);
73 1.2.6.2 yamt void chfs_obsolete_node_frag(struct chfs_mount *,
74 1.2.6.2 yamt struct chfs_node_frag *);
75 1.2.6.2 yamt /* general node operations */
76 1.2.6.2 yamt int chfs_get_data_nodes(struct chfs_mount *,
77 1.2.6.2 yamt struct chfs_inode *,
78 1.2.6.2 yamt struct chfs_readinode_info *);
79 1.2.6.2 yamt int chfs_build_fragtree(struct chfs_mount *,
80 1.2.6.2 yamt struct chfs_inode *,
81 1.2.6.2 yamt struct chfs_readinode_info *);
82 1.2.6.2 yamt
83 1.2.6.2 yamt
84 1.2.6.2 yamt
85 1.2.6.2 yamt /*
86 1.2.6.2 yamt * --------------------------
87 1.2.6.2 yamt * tmp node rbtree operations
88 1.2.6.2 yamt * --------------------------
89 1.2.6.2 yamt */
90 1.2.6.2 yamt static signed int
91 1.2.6.2 yamt tmp_node_compare_nodes(void *ctx, const void *n1, const void *n2)
92 1.2.6.2 yamt {
93 1.2.6.2 yamt const struct chfs_tmp_dnode_info *tdi1 = n1;
94 1.2.6.2 yamt const struct chfs_tmp_dnode_info *tdi2 = n2;
95 1.2.6.2 yamt
96 1.2.6.2 yamt return (tdi1->tmpnode->node->ofs - tdi2->tmpnode->node->ofs);
97 1.2.6.2 yamt }
98 1.2.6.2 yamt
99 1.2.6.2 yamt static signed int
100 1.2.6.2 yamt tmp_node_compare_key(void *ctx, const void *n, const void *key)
101 1.2.6.2 yamt {
102 1.2.6.2 yamt const struct chfs_tmp_dnode_info *tdi = n;
103 1.2.6.2 yamt uint64_t ofs = *(const uint64_t *)key;
104 1.2.6.2 yamt
105 1.2.6.2 yamt return (tdi->tmpnode->node->ofs - ofs);
106 1.2.6.2 yamt }
107 1.2.6.2 yamt
108 1.2.6.2 yamt const rb_tree_ops_t tmp_node_rbtree_ops = {
109 1.2.6.2 yamt .rbto_compare_nodes = tmp_node_compare_nodes,
110 1.2.6.2 yamt .rbto_compare_key = tmp_node_compare_key,
111 1.2.6.2 yamt .rbto_node_offset = offsetof(struct chfs_tmp_dnode_info, rb_node),
112 1.2.6.2 yamt .rbto_context = NULL
113 1.2.6.2 yamt };
114 1.2.6.2 yamt
115 1.2.6.2 yamt
116 1.2.6.2 yamt /*
117 1.2.6.2 yamt * ---------------------------
118 1.2.6.2 yamt * frag node rbtree operations
119 1.2.6.2 yamt * ---------------------------
120 1.2.6.2 yamt */
121 1.2.6.2 yamt static signed int
122 1.2.6.2 yamt frag_compare_nodes(void *ctx, const void *n1, const void *n2)
123 1.2.6.2 yamt {
124 1.2.6.2 yamt const struct chfs_node_frag *frag1 = n1;
125 1.2.6.2 yamt const struct chfs_node_frag *frag2 = n2;
126 1.2.6.2 yamt
127 1.2.6.2 yamt return (frag1->ofs - frag2->ofs);
128 1.2.6.2 yamt }
129 1.2.6.2 yamt
130 1.2.6.2 yamt static signed int
131 1.2.6.2 yamt frag_compare_key(void *ctx, const void *n, const void *key)
132 1.2.6.2 yamt {
133 1.2.6.2 yamt const struct chfs_node_frag *frag = n;
134 1.2.6.2 yamt uint64_t ofs = *(const uint64_t *)key;
135 1.2.6.2 yamt
136 1.2.6.2 yamt return (frag->ofs - ofs);
137 1.2.6.2 yamt }
138 1.2.6.2 yamt
139 1.2.6.2 yamt const rb_tree_ops_t frag_rbtree_ops = {
140 1.2.6.2 yamt .rbto_compare_nodes = frag_compare_nodes,
141 1.2.6.2 yamt .rbto_compare_key = frag_compare_key,
142 1.2.6.2 yamt .rbto_node_offset = offsetof(struct chfs_node_frag, rb_node),
143 1.2.6.2 yamt .rbto_context = NULL
144 1.2.6.2 yamt };
145 1.2.6.2 yamt
146 1.2.6.2 yamt
147 1.2.6.2 yamt /*
148 1.2.6.2 yamt * -------------------
149 1.2.6.2 yamt * tmp node operations
150 1.2.6.2 yamt * -------------------
151 1.2.6.2 yamt */
152 1.2.6.2 yamt /*
153 1.2.6.2 yamt * Check the data CRC of the node.
154 1.2.6.2 yamt *
155 1.2.6.2 yamt * Returns: 0 - if everything OK;
156 1.2.6.2 yamt * 1 - if CRC is incorrect;
157 1.2.6.2 yamt * 2 - else;
158 1.2.6.2 yamt * error code if an error occured.
159 1.2.6.2 yamt */
160 1.2.6.2 yamt int
161 1.2.6.2 yamt chfs_check_td_data(struct chfs_mount *chmp,
162 1.2.6.2 yamt struct chfs_tmp_dnode *td)
163 1.2.6.2 yamt {
164 1.2.6.2 yamt int err;
165 1.2.6.2 yamt size_t retlen, len, totlen;
166 1.2.6.2 yamt uint32_t crc;
167 1.2.6.2 yamt uint64_t ofs;
168 1.2.6.2 yamt char *buf;
169 1.2.6.2 yamt struct chfs_node_ref *nref = td->node->nref;
170 1.2.6.2 yamt
171 1.2.6.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
172 1.2.6.2 yamt KASSERT(!mutex_owned(&chmp->chm_lock_sizes));
173 1.2.6.2 yamt
174 1.2.6.2 yamt ofs = CHFS_GET_OFS(nref->nref_offset) + sizeof(struct chfs_flash_data_node);
175 1.2.6.2 yamt len = td->node->size;
176 1.2.6.2 yamt if (!len)
177 1.2.6.2 yamt return 0;
178 1.2.6.2 yamt
179 1.2.6.2 yamt buf = kmem_alloc(len, KM_SLEEP);
180 1.2.6.2 yamt if (!buf) {
181 1.2.6.2 yamt dbg("allocating error\n");
182 1.2.6.2 yamt return 2;
183 1.2.6.2 yamt }
184 1.2.6.2 yamt err = chfs_read_leb(chmp, nref->nref_lnr, buf, ofs, len, &retlen);
185 1.2.6.2 yamt if (err) {
186 1.2.6.2 yamt dbg("error wile reading: %d\n", err);
187 1.2.6.2 yamt err = 2;
188 1.2.6.2 yamt goto out;
189 1.2.6.2 yamt }
190 1.2.6.2 yamt
191 1.2.6.2 yamt if (len != retlen) {
192 1.2.6.2 yamt dbg("len:%zu, retlen:%zu\n", len, retlen);
193 1.2.6.2 yamt err = 2;
194 1.2.6.2 yamt goto out;
195 1.2.6.2 yamt }
196 1.2.6.2 yamt crc = crc32(0, (uint8_t *)buf, len);
197 1.2.6.2 yamt
198 1.2.6.2 yamt if (crc != td->data_crc) {
199 1.2.6.2 yamt dbg("crc failed, calculated: 0x%x, orig: 0x%x\n", crc, td->data_crc);
200 1.2.6.2 yamt kmem_free(buf, len);
201 1.2.6.2 yamt return 1;
202 1.2.6.2 yamt }
203 1.2.6.2 yamt
204 1.2.6.2 yamt nref->nref_offset = CHFS_GET_OFS(nref->nref_offset) | CHFS_NORMAL_NODE_MASK;
205 1.2.6.2 yamt totlen = CHFS_PAD(sizeof(struct chfs_flash_data_node) + len);
206 1.2.6.2 yamt
207 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_sizes);
208 1.2.6.2 yamt chfs_change_size_unchecked(chmp, &chmp->chm_blocks[nref->nref_lnr], -totlen);
209 1.2.6.2 yamt chfs_change_size_used(chmp, &chmp->chm_blocks[nref->nref_lnr], totlen);
210 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
211 1.2.6.2 yamt KASSERT(chmp->chm_blocks[nref->nref_lnr].used_size <= chmp->chm_ebh->eb_size);
212 1.2.6.2 yamt
213 1.2.6.2 yamt err = 0;
214 1.2.6.2 yamt out:
215 1.2.6.2 yamt kmem_free(buf, len);
216 1.2.6.2 yamt return err;
217 1.2.6.2 yamt }
218 1.2.6.2 yamt
219 1.2.6.2 yamt int
220 1.2.6.2 yamt chfs_check_td_node(struct chfs_mount *chmp, struct chfs_tmp_dnode *td)
221 1.2.6.2 yamt {
222 1.2.6.2 yamt int ret;
223 1.2.6.2 yamt
224 1.2.6.2 yamt if (CHFS_REF_FLAGS(td->node->nref) != CHFS_UNCHECKED_NODE_MASK)
225 1.2.6.2 yamt return 0;
226 1.2.6.2 yamt
227 1.2.6.2 yamt ret = chfs_check_td_data(chmp, td);
228 1.2.6.2 yamt if (ret == 1) {
229 1.2.6.2 yamt chfs_mark_node_obsolete(chmp, td->node->nref);
230 1.2.6.2 yamt }
231 1.2.6.2 yamt return ret;
232 1.2.6.2 yamt }
233 1.2.6.2 yamt
234 1.2.6.2 yamt
235 1.2.6.2 yamt struct chfs_node_ref *
236 1.2.6.2 yamt chfs_first_valid_data_ref(struct chfs_node_ref *nref)
237 1.2.6.2 yamt {
238 1.2.6.2 yamt while (nref) {
239 1.2.6.2 yamt if (!CHFS_REF_OBSOLETE(nref)) {
240 1.2.6.2 yamt #ifdef DGB_MSG_GC
241 1.2.6.2 yamt if (nref->nref_lnr == REF_EMPTY_NODE) {
242 1.2.6.2 yamt dbg("FIRST VALID IS EMPTY!\n");
243 1.2.6.2 yamt }
244 1.2.6.2 yamt #endif
245 1.2.6.2 yamt return nref;
246 1.2.6.2 yamt }
247 1.2.6.2 yamt
248 1.2.6.2 yamt if (nref->nref_next) {
249 1.2.6.2 yamt nref = nref->nref_next;
250 1.2.6.2 yamt } else
251 1.2.6.2 yamt break;
252 1.2.6.2 yamt }
253 1.2.6.2 yamt return NULL;
254 1.2.6.2 yamt }
255 1.2.6.2 yamt
256 1.2.6.2 yamt void
257 1.2.6.2 yamt chfs_add_tmp_dnode_to_tdi(struct chfs_tmp_dnode_info *tdi,
258 1.2.6.2 yamt struct chfs_tmp_dnode *td)
259 1.2.6.2 yamt {
260 1.2.6.2 yamt if (!tdi->tmpnode) {
261 1.2.6.2 yamt tdi->tmpnode = td;
262 1.2.6.2 yamt } else {
263 1.2.6.2 yamt struct chfs_tmp_dnode *tmp = tdi->tmpnode;
264 1.2.6.2 yamt while (tmp->next) {
265 1.2.6.2 yamt tmp = tmp->next;
266 1.2.6.2 yamt }
267 1.2.6.2 yamt tmp->next = td;
268 1.2.6.2 yamt }
269 1.2.6.2 yamt }
270 1.2.6.2 yamt
271 1.2.6.2 yamt void
272 1.2.6.2 yamt chfs_remove_tmp_dnode_from_tdi(struct chfs_tmp_dnode_info *tdi,
273 1.2.6.2 yamt struct chfs_tmp_dnode *td)
274 1.2.6.2 yamt {
275 1.2.6.2 yamt if (tdi->tmpnode == td) {
276 1.2.6.2 yamt tdi->tmpnode = tdi->tmpnode->next;
277 1.2.6.2 yamt } else {
278 1.2.6.2 yamt struct chfs_tmp_dnode *tmp = tdi->tmpnode->next;
279 1.2.6.2 yamt while (tmp->next && tmp->next != td) {
280 1.2.6.2 yamt tmp = tmp->next;
281 1.2.6.2 yamt }
282 1.2.6.2 yamt if (tmp->next) {
283 1.2.6.2 yamt tmp->next = td->next;
284 1.2.6.2 yamt }
285 1.2.6.2 yamt }
286 1.2.6.2 yamt }
287 1.2.6.2 yamt
288 1.2.6.2 yamt static void
289 1.2.6.2 yamt chfs_kill_td(struct chfs_mount *chmp,
290 1.2.6.2 yamt struct chfs_tmp_dnode *td)
291 1.2.6.2 yamt {
292 1.2.6.2 yamt /* check if we need to mark as obsolete, to avoid double mark */
293 1.2.6.2 yamt if (!CHFS_REF_OBSOLETE(td->node->nref)) {
294 1.2.6.2 yamt chfs_mark_node_obsolete(chmp, td->node->nref);
295 1.2.6.2 yamt }
296 1.2.6.2 yamt
297 1.2.6.2 yamt chfs_free_tmp_dnode(td);
298 1.2.6.2 yamt }
299 1.2.6.2 yamt
300 1.2.6.2 yamt static void
301 1.2.6.2 yamt chfs_kill_tdi(struct chfs_mount *chmp,
302 1.2.6.2 yamt struct chfs_tmp_dnode_info *tdi)
303 1.2.6.2 yamt {
304 1.2.6.2 yamt struct chfs_tmp_dnode *next, *tmp = tdi->tmpnode;
305 1.2.6.2 yamt
306 1.2.6.2 yamt while (tmp) {
307 1.2.6.2 yamt next = tmp->next;
308 1.2.6.2 yamt chfs_kill_td(chmp, tmp);
309 1.2.6.2 yamt tmp = next;
310 1.2.6.2 yamt }
311 1.2.6.2 yamt
312 1.2.6.2 yamt chfs_free_tmp_dnode_info(tdi);
313 1.2.6.2 yamt }
314 1.2.6.2 yamt
315 1.2.6.2 yamt int
316 1.2.6.2 yamt chfs_add_tmp_dnode_to_tree(struct chfs_mount *chmp,
317 1.2.6.2 yamt struct chfs_readinode_info *rii,
318 1.2.6.2 yamt struct chfs_tmp_dnode *newtd)
319 1.2.6.2 yamt {
320 1.2.6.2 yamt uint64_t end_ofs = newtd->node->ofs + newtd->node->size;
321 1.2.6.2 yamt struct chfs_tmp_dnode_info *this;
322 1.2.6.2 yamt struct rb_node *node, *prev_node;
323 1.2.6.2 yamt struct chfs_tmp_dnode_info *newtdi;
324 1.2.6.2 yamt
325 1.2.6.2 yamt node = rb_tree_find_node(&rii->tdi_root, &newtd->node->ofs);
326 1.2.6.2 yamt if (node) {
327 1.2.6.2 yamt this = (struct chfs_tmp_dnode_info *)node;
328 1.2.6.2 yamt while (this->tmpnode->overlapped) {
329 1.2.6.2 yamt prev_node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_LEFT);
330 1.2.6.2 yamt if (!prev_node) {
331 1.2.6.2 yamt this->tmpnode->overlapped = 0;
332 1.2.6.2 yamt break;
333 1.2.6.2 yamt }
334 1.2.6.2 yamt node = prev_node;
335 1.2.6.2 yamt this = (struct chfs_tmp_dnode_info *)node;
336 1.2.6.2 yamt }
337 1.2.6.2 yamt }
338 1.2.6.2 yamt while (node) {
339 1.2.6.2 yamt this = (struct chfs_tmp_dnode_info *)node;
340 1.2.6.2 yamt if (this->tmpnode->node->ofs > end_ofs)
341 1.2.6.2 yamt break;
342 1.2.6.2 yamt
343 1.2.6.2 yamt struct chfs_tmp_dnode *tmp_td = this->tmpnode;
344 1.2.6.2 yamt while (tmp_td) {
345 1.2.6.2 yamt if (tmp_td->version == newtd->version) {
346 1.2.6.2 yamt if (!chfs_check_td_node(chmp, tmp_td)) {
347 1.2.6.2 yamt dbg("calling kill td 0\n");
348 1.2.6.2 yamt chfs_kill_td(chmp, newtd);
349 1.2.6.2 yamt return 0;
350 1.2.6.2 yamt } else {
351 1.2.6.2 yamt chfs_remove_tmp_dnode_from_tdi(this, tmp_td);
352 1.2.6.2 yamt chfs_kill_td(chmp, tmp_td);
353 1.2.6.2 yamt chfs_add_tmp_dnode_to_tdi(this, newtd);
354 1.2.6.2 yamt return 0;
355 1.2.6.2 yamt }
356 1.2.6.2 yamt }
357 1.2.6.2 yamt if (tmp_td->version < newtd->version &&
358 1.2.6.2 yamt tmp_td->node->ofs >= newtd->node->ofs &&
359 1.2.6.2 yamt tmp_td->node->ofs + tmp_td->node->size <= end_ofs) {
360 1.2.6.2 yamt /* New node entirely overlaps 'this' */
361 1.2.6.2 yamt if (chfs_check_td_node(chmp, newtd)) {
362 1.2.6.2 yamt dbg("calling kill td 2\n");
363 1.2.6.2 yamt chfs_kill_td(chmp, newtd);
364 1.2.6.2 yamt return 0;
365 1.2.6.2 yamt }
366 1.2.6.2 yamt /* ... and is good. Kill 'this' and any subsequent nodes which are also overlapped */
367 1.2.6.2 yamt while (tmp_td && tmp_td->node->ofs + tmp_td->node->size <= end_ofs) {
368 1.2.6.2 yamt struct rb_node *next = rb_tree_iterate(&rii->tdi_root, this, RB_DIR_RIGHT);
369 1.2.6.2 yamt struct chfs_tmp_dnode_info *next_tdi = (struct chfs_tmp_dnode_info *)next;
370 1.2.6.2 yamt struct chfs_tmp_dnode *next_td = NULL;
371 1.2.6.2 yamt if (tmp_td->next) {
372 1.2.6.2 yamt next_td = tmp_td->next;
373 1.2.6.2 yamt } else if (next_tdi) {
374 1.2.6.2 yamt next_td = next_tdi->tmpnode;
375 1.2.6.2 yamt }
376 1.2.6.2 yamt if (tmp_td->version < newtd->version) {
377 1.2.6.2 yamt chfs_remove_tmp_dnode_from_tdi(this, tmp_td);
378 1.2.6.2 yamt chfs_kill_td(chmp, tmp_td);
379 1.2.6.2 yamt if (!this->tmpnode) {
380 1.2.6.2 yamt rb_tree_remove_node(&rii->tdi_root, this);
381 1.2.6.2 yamt chfs_kill_tdi(chmp, this);
382 1.2.6.2 yamt this = next_tdi;
383 1.2.6.2 yamt }
384 1.2.6.2 yamt }
385 1.2.6.2 yamt tmp_td = next_td;
386 1.2.6.2 yamt }
387 1.2.6.2 yamt continue;
388 1.2.6.2 yamt }
389 1.2.6.2 yamt if (tmp_td->version > newtd->version &&
390 1.2.6.2 yamt tmp_td->node->ofs <= newtd->node->ofs &&
391 1.2.6.2 yamt tmp_td->node->ofs + tmp_td->node->size >= end_ofs) {
392 1.2.6.2 yamt /* New node entirely overlapped by 'this' */
393 1.2.6.2 yamt if (!chfs_check_td_node(chmp, tmp_td)) {
394 1.2.6.2 yamt dbg("this version: %llu\n",
395 1.2.6.2 yamt (unsigned long long)tmp_td->version);
396 1.2.6.2 yamt dbg("this ofs: %llu, size: %u\n",
397 1.2.6.2 yamt (unsigned long long)tmp_td->node->ofs,
398 1.2.6.2 yamt tmp_td->node->size);
399 1.2.6.2 yamt dbg("calling kill td 4\n");
400 1.2.6.2 yamt chfs_kill_td(chmp, newtd);
401 1.2.6.2 yamt return 0;
402 1.2.6.2 yamt }
403 1.2.6.2 yamt /* ... but 'this' was bad. Replace it... */
404 1.2.6.2 yamt chfs_remove_tmp_dnode_from_tdi(this, tmp_td);
405 1.2.6.2 yamt chfs_kill_td(chmp, tmp_td);
406 1.2.6.2 yamt if (!this->tmpnode) {
407 1.2.6.2 yamt rb_tree_remove_node(&rii->tdi_root, this);
408 1.2.6.2 yamt chfs_kill_tdi(chmp, this);
409 1.2.6.2 yamt }
410 1.2.6.2 yamt dbg("calling kill td 5\n");
411 1.2.6.2 yamt chfs_kill_td(chmp, newtd);
412 1.2.6.2 yamt break;
413 1.2.6.2 yamt }
414 1.2.6.2 yamt tmp_td = tmp_td->next;
415 1.2.6.2 yamt }
416 1.2.6.2 yamt node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_RIGHT);
417 1.2.6.2 yamt }
418 1.2.6.2 yamt
419 1.2.6.2 yamt newtdi = chfs_alloc_tmp_dnode_info();
420 1.2.6.2 yamt chfs_add_tmp_dnode_to_tdi(newtdi, newtd);
421 1.2.6.2 yamt /* We neither completely obsoleted nor were completely
422 1.2.6.2 yamt obsoleted by an earlier node. Insert into the tree */
423 1.2.6.2 yamt struct chfs_tmp_dnode_info *tmp_tdi = rb_tree_insert_node(&rii->tdi_root, newtdi);
424 1.2.6.2 yamt if (tmp_tdi != newtdi) {
425 1.2.6.2 yamt chfs_add_tmp_dnode_to_tdi(tmp_tdi, newtd);
426 1.2.6.2 yamt newtdi->tmpnode = NULL;
427 1.2.6.2 yamt chfs_kill_tdi(chmp, newtdi);
428 1.2.6.2 yamt }
429 1.2.6.2 yamt
430 1.2.6.2 yamt /* If there's anything behind that overlaps us, note it */
431 1.2.6.2 yamt node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_LEFT);
432 1.2.6.2 yamt if (node) {
433 1.2.6.2 yamt while (1) {
434 1.2.6.2 yamt this = (struct chfs_tmp_dnode_info *)node;
435 1.2.6.2 yamt if (this->tmpnode->node->ofs + this->tmpnode->node->size > newtd->node->ofs) {
436 1.2.6.2 yamt newtd->overlapped = 1;
437 1.2.6.2 yamt }
438 1.2.6.2 yamt if (!this->tmpnode->overlapped)
439 1.2.6.2 yamt break;
440 1.2.6.2 yamt
441 1.2.6.2 yamt prev_node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_LEFT);
442 1.2.6.2 yamt if (!prev_node) {
443 1.2.6.2 yamt this->tmpnode->overlapped = 0;
444 1.2.6.2 yamt break;
445 1.2.6.2 yamt }
446 1.2.6.2 yamt node = prev_node;
447 1.2.6.2 yamt }
448 1.2.6.2 yamt }
449 1.2.6.2 yamt
450 1.2.6.2 yamt /* If the new node overlaps anything ahead, note it */
451 1.2.6.2 yamt node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_RIGHT);
452 1.2.6.2 yamt this = (struct chfs_tmp_dnode_info *)node;
453 1.2.6.2 yamt while (this && this->tmpnode->node->ofs < end_ofs) {
454 1.2.6.2 yamt this->tmpnode->overlapped = 1;
455 1.2.6.2 yamt node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_RIGHT);
456 1.2.6.2 yamt this = (struct chfs_tmp_dnode_info *)node;
457 1.2.6.2 yamt }
458 1.2.6.2 yamt return 0;
459 1.2.6.2 yamt }
460 1.2.6.2 yamt
461 1.2.6.2 yamt
462 1.2.6.2 yamt /*
463 1.2.6.2 yamt * --------------------
464 1.2.6.2 yamt * frag node operations
465 1.2.6.2 yamt * --------------------
466 1.2.6.2 yamt */
467 1.2.6.2 yamt struct chfs_node_frag *
468 1.2.6.2 yamt new_fragment(struct chfs_full_dnode *fdn, uint32_t ofs, uint32_t size)
469 1.2.6.2 yamt {
470 1.2.6.2 yamt struct chfs_node_frag *newfrag;
471 1.2.6.2 yamt newfrag = chfs_alloc_node_frag();
472 1.2.6.2 yamt if (newfrag) {
473 1.2.6.2 yamt newfrag->ofs = ofs;
474 1.2.6.2 yamt newfrag->size = size;
475 1.2.6.2 yamt newfrag->node = fdn;
476 1.2.6.2 yamt } else {
477 1.2.6.2 yamt chfs_err("cannot allocate a chfs_node_frag object\n");
478 1.2.6.2 yamt }
479 1.2.6.2 yamt return newfrag;
480 1.2.6.2 yamt }
481 1.2.6.2 yamt
482 1.2.6.2 yamt int
483 1.2.6.2 yamt no_overlapping_node(struct rb_tree *fragtree,
484 1.2.6.2 yamt struct chfs_node_frag *newfrag,
485 1.2.6.2 yamt struct chfs_node_frag *this, uint32_t lastend)
486 1.2.6.2 yamt {
487 1.2.6.2 yamt if (lastend < newfrag->node->ofs) {
488 1.2.6.2 yamt struct chfs_node_frag *holefrag;
489 1.2.6.2 yamt
490 1.2.6.2 yamt holefrag = new_fragment(NULL, lastend, newfrag->node->ofs - lastend);
491 1.2.6.2 yamt if (!holefrag) {
492 1.2.6.2 yamt chfs_free_node_frag(newfrag);
493 1.2.6.2 yamt return ENOMEM;
494 1.2.6.2 yamt }
495 1.2.6.2 yamt
496 1.2.6.2 yamt rb_tree_insert_node(fragtree, holefrag);
497 1.2.6.2 yamt this = holefrag;
498 1.2.6.2 yamt }
499 1.2.6.2 yamt
500 1.2.6.2 yamt rb_tree_insert_node(fragtree, newfrag);
501 1.2.6.2 yamt
502 1.2.6.2 yamt return 0;
503 1.2.6.2 yamt }
504 1.2.6.2 yamt
505 1.2.6.2 yamt int
506 1.2.6.2 yamt chfs_add_frag_to_fragtree(struct chfs_mount *chmp,
507 1.2.6.2 yamt struct rb_tree *fragtree,
508 1.2.6.2 yamt struct chfs_node_frag *newfrag)
509 1.2.6.2 yamt {
510 1.2.6.2 yamt struct chfs_node_frag *this;
511 1.2.6.2 yamt uint32_t lastend;
512 1.2.6.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
513 1.2.6.2 yamt
514 1.2.6.2 yamt this = (struct chfs_node_frag *)rb_tree_find_node_leq(fragtree, &newfrag->ofs);
515 1.2.6.2 yamt
516 1.2.6.2 yamt if (this) {
517 1.2.6.2 yamt lastend = this->ofs + this->size;
518 1.2.6.2 yamt } else {
519 1.2.6.2 yamt lastend = 0;
520 1.2.6.2 yamt }
521 1.2.6.2 yamt
522 1.2.6.2 yamt if (lastend <= newfrag->ofs) {
523 1.2.6.2 yamt //dbg("no overlapping node\n");
524 1.2.6.2 yamt if (lastend && (lastend - 1) >> PAGE_SHIFT == newfrag->ofs >> PAGE_SHIFT) {
525 1.2.6.2 yamt if (this->node)
526 1.2.6.2 yamt CHFS_MARK_REF_NORMAL(this->node->nref);
527 1.2.6.2 yamt CHFS_MARK_REF_NORMAL(newfrag->node->nref);
528 1.2.6.2 yamt }
529 1.2.6.2 yamt return no_overlapping_node(fragtree, newfrag, this, lastend);
530 1.2.6.2 yamt }
531 1.2.6.2 yamt
532 1.2.6.2 yamt if (newfrag->ofs > this->ofs) {
533 1.2.6.2 yamt
534 1.2.6.2 yamt CHFS_MARK_REF_NORMAL(newfrag->node->nref);
535 1.2.6.2 yamt if (this->node)
536 1.2.6.2 yamt CHFS_MARK_REF_NORMAL(this->node->nref);
537 1.2.6.2 yamt
538 1.2.6.2 yamt if (this->ofs + this->size > newfrag->ofs + newfrag->size) {
539 1.2.6.2 yamt /* newfrag is inside of this */
540 1.2.6.2 yamt //dbg("newfrag is inside of this\n");
541 1.2.6.2 yamt struct chfs_node_frag *newfrag2;
542 1.2.6.2 yamt
543 1.2.6.2 yamt newfrag2 = new_fragment(this->node, newfrag->ofs + newfrag->size,
544 1.2.6.2 yamt this->ofs + this->size - newfrag->ofs - newfrag->size);
545 1.2.6.2 yamt if (!newfrag2)
546 1.2.6.2 yamt return ENOMEM;
547 1.2.6.2 yamt if (this->node)
548 1.2.6.2 yamt this->node->frags++;
549 1.2.6.2 yamt
550 1.2.6.2 yamt this->size = newfrag->ofs - this->ofs;
551 1.2.6.2 yamt
552 1.2.6.2 yamt rb_tree_insert_node(fragtree, newfrag);
553 1.2.6.2 yamt rb_tree_insert_node(fragtree, newfrag2);
554 1.2.6.2 yamt
555 1.2.6.2 yamt return 0;
556 1.2.6.2 yamt }
557 1.2.6.2 yamt /* newfrag is bottom of this */
558 1.2.6.2 yamt //dbg("newfrag is bottom of this\n");
559 1.2.6.2 yamt this->size = newfrag->ofs - this->ofs;
560 1.2.6.2 yamt rb_tree_insert_node(fragtree, newfrag);
561 1.2.6.2 yamt } else {
562 1.2.6.2 yamt /* newfrag start at same point */
563 1.2.6.2 yamt //dbg("newfrag start at same point\n");
564 1.2.6.2 yamt //TODO replace instead of remove and insert
565 1.2.6.2 yamt rb_tree_remove_node(fragtree, this);
566 1.2.6.2 yamt rb_tree_insert_node(fragtree, newfrag);
567 1.2.6.2 yamt
568 1.2.6.2 yamt if (newfrag->ofs + newfrag->size >= this->ofs+this->size) {
569 1.2.6.2 yamt chfs_obsolete_node_frag(chmp, this);
570 1.2.6.2 yamt } else {
571 1.2.6.2 yamt this->ofs += newfrag->size;
572 1.2.6.2 yamt this->size -= newfrag->size;
573 1.2.6.2 yamt
574 1.2.6.2 yamt rb_tree_insert_node(fragtree, this);
575 1.2.6.2 yamt return 0;
576 1.2.6.2 yamt }
577 1.2.6.2 yamt }
578 1.2.6.2 yamt /* OK, now we have newfrag added in the correct place in the tree, but
579 1.2.6.2 yamt frag_next(newfrag) may be a fragment which is overlapped by it
580 1.2.6.2 yamt */
581 1.2.6.2 yamt while ((this = frag_next(fragtree, newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) {
582 1.2.6.2 yamt rb_tree_remove_node(fragtree, this);
583 1.2.6.2 yamt chfs_obsolete_node_frag(chmp, this);
584 1.2.6.2 yamt }
585 1.2.6.2 yamt
586 1.2.6.2 yamt if (!this || newfrag->ofs + newfrag->size == this->ofs)
587 1.2.6.2 yamt return 0;
588 1.2.6.2 yamt
589 1.2.6.2 yamt this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size);
590 1.2.6.2 yamt this->ofs = newfrag->ofs + newfrag->size;
591 1.2.6.2 yamt
592 1.2.6.2 yamt if (this->node)
593 1.2.6.2 yamt CHFS_MARK_REF_NORMAL(this->node->nref);
594 1.2.6.2 yamt CHFS_MARK_REF_NORMAL(newfrag->node->nref);
595 1.2.6.2 yamt
596 1.2.6.2 yamt return 0;
597 1.2.6.2 yamt }
598 1.2.6.2 yamt
599 1.2.6.2 yamt void
600 1.2.6.2 yamt chfs_kill_fragtree(struct rb_tree *fragtree)
601 1.2.6.2 yamt {
602 1.2.6.2 yamt struct chfs_node_frag *this, *next;
603 1.2.6.2 yamt //dbg("start\n");
604 1.2.6.2 yamt
605 1.2.6.2 yamt this = (struct chfs_node_frag *)RB_TREE_MIN(fragtree);
606 1.2.6.2 yamt while (this) {
607 1.2.6.2 yamt //for (this = (struct chfs_node_frag *)RB_TREE_MIN(&fragtree); this != NULL; this = (struct chfs_node_frag *)rb_tree_iterate(&fragtree, &this->rb_node, RB_DIR_RIGHT)) {
608 1.2.6.2 yamt next = frag_next(fragtree, this);
609 1.2.6.2 yamt rb_tree_remove_node(fragtree, this);
610 1.2.6.2 yamt chfs_free_node_frag(this);
611 1.2.6.2 yamt //dbg("one frag killed\n");
612 1.2.6.2 yamt this = next;
613 1.2.6.2 yamt }
614 1.2.6.2 yamt //dbg("end\n");
615 1.2.6.2 yamt }
616 1.2.6.2 yamt
617 1.2.6.2 yamt uint32_t
618 1.2.6.2 yamt chfs_truncate_fragtree(struct chfs_mount *chmp,
619 1.2.6.2 yamt struct rb_tree *fragtree, uint32_t size)
620 1.2.6.2 yamt {
621 1.2.6.2 yamt struct chfs_node_frag *frag;
622 1.2.6.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
623 1.2.6.2 yamt
624 1.2.6.2 yamt dbg("truncate to size: %u\n", size);
625 1.2.6.2 yamt
626 1.2.6.2 yamt frag = (struct chfs_node_frag *)rb_tree_find_node_leq(fragtree, &size);
627 1.2.6.2 yamt
628 1.2.6.2 yamt /* Find the last frag before size and set its new size. */
629 1.2.6.2 yamt if (frag && frag->ofs != size) {
630 1.2.6.2 yamt if (frag->ofs + frag->size > size) {
631 1.2.6.2 yamt frag->size = size - frag->ofs;
632 1.2.6.2 yamt }
633 1.2.6.2 yamt frag = frag_next(fragtree, frag);
634 1.2.6.2 yamt }
635 1.2.6.2 yamt
636 1.2.6.2 yamt /* Delete frags after new size. */
637 1.2.6.2 yamt while (frag && frag->ofs >= size) {
638 1.2.6.2 yamt struct chfs_node_frag *next = frag_next(fragtree, frag);
639 1.2.6.2 yamt
640 1.2.6.2 yamt rb_tree_remove_node(fragtree, frag);
641 1.2.6.2 yamt chfs_obsolete_node_frag(chmp, frag);
642 1.2.6.2 yamt frag = next;
643 1.2.6.2 yamt }
644 1.2.6.2 yamt
645 1.2.6.2 yamt if (size == 0) {
646 1.2.6.2 yamt return 0;
647 1.2.6.2 yamt }
648 1.2.6.2 yamt
649 1.2.6.2 yamt frag = frag_last(fragtree);
650 1.2.6.2 yamt
651 1.2.6.2 yamt if (!frag) {
652 1.2.6.2 yamt return 0;
653 1.2.6.2 yamt }
654 1.2.6.2 yamt
655 1.2.6.2 yamt if (frag->ofs + frag->size < size) {
656 1.2.6.2 yamt return frag->ofs + frag->size;
657 1.2.6.2 yamt }
658 1.2.6.2 yamt
659 1.2.6.2 yamt /* FIXME Should we check the postion of the last node? (PAGE_CACHE size, etc.) */
660 1.2.6.2 yamt if (frag->node && (frag->ofs & (PAGE_SIZE - 1)) == 0) {
661 1.2.6.2 yamt frag->node->nref->nref_offset = CHFS_GET_OFS(frag->node->nref->nref_offset) | CHFS_PRISTINE_NODE_MASK;
662 1.2.6.2 yamt }
663 1.2.6.2 yamt
664 1.2.6.2 yamt return size;
665 1.2.6.2 yamt }
666 1.2.6.2 yamt
667 1.2.6.2 yamt void
668 1.2.6.2 yamt chfs_obsolete_node_frag(struct chfs_mount *chmp,
669 1.2.6.2 yamt struct chfs_node_frag *this)
670 1.2.6.2 yamt {
671 1.2.6.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
672 1.2.6.2 yamt if (this->node) {
673 1.2.6.2 yamt this->node->frags--;
674 1.2.6.2 yamt if (!this->node->frags) {
675 1.2.6.2 yamt struct chfs_vnode_cache *vc = chfs_nref_to_vc(this->node->nref);
676 1.2.6.2 yamt chfs_mark_node_obsolete(chmp, this->node->nref);
677 1.2.6.2 yamt
678 1.2.6.2 yamt if (vc->dnode == this->node->nref) {
679 1.2.6.2 yamt vc->dnode = this->node->nref->nref_next;
680 1.2.6.2 yamt } else {
681 1.2.6.2 yamt struct chfs_node_ref *tmp = vc->dnode;
682 1.2.6.2 yamt while (tmp->nref_next != (struct chfs_node_ref*) vc
683 1.2.6.2 yamt && tmp->nref_next != this->node->nref) {
684 1.2.6.2 yamt tmp = tmp->nref_next;
685 1.2.6.2 yamt }
686 1.2.6.2 yamt if (tmp->nref_next == this->node->nref) {
687 1.2.6.2 yamt tmp->nref_next = this->node->nref->nref_next;
688 1.2.6.2 yamt }
689 1.2.6.2 yamt // FIXME should we free here the this->node->nref?
690 1.2.6.2 yamt }
691 1.2.6.2 yamt
692 1.2.6.2 yamt chfs_free_full_dnode(this->node);
693 1.2.6.2 yamt } else {
694 1.2.6.2 yamt CHFS_MARK_REF_NORMAL(this->node->nref);
695 1.2.6.2 yamt }
696 1.2.6.2 yamt }
697 1.2.6.2 yamt chfs_free_node_frag(this);
698 1.2.6.2 yamt }
699 1.2.6.2 yamt
700 1.2.6.2 yamt int
701 1.2.6.2 yamt chfs_add_full_dnode_to_inode(struct chfs_mount *chmp,
702 1.2.6.2 yamt struct chfs_inode *ip,
703 1.2.6.2 yamt struct chfs_full_dnode *fd)
704 1.2.6.2 yamt {
705 1.2.6.2 yamt int ret;
706 1.2.6.2 yamt struct chfs_node_frag *newfrag;
707 1.2.6.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
708 1.2.6.2 yamt
709 1.2.6.2 yamt if (unlikely(!fd->size))
710 1.2.6.2 yamt return 0;
711 1.2.6.2 yamt
712 1.2.6.2 yamt newfrag = new_fragment(fd, fd->ofs, fd->size);
713 1.2.6.2 yamt if (unlikely(!newfrag))
714 1.2.6.2 yamt return ENOMEM;
715 1.2.6.2 yamt
716 1.2.6.2 yamt newfrag->node->frags = 1;
717 1.2.6.2 yamt
718 1.2.6.2 yamt ret = chfs_add_frag_to_fragtree(chmp, &ip->fragtree, newfrag);
719 1.2.6.2 yamt if (ret)
720 1.2.6.2 yamt return ret;
721 1.2.6.2 yamt
722 1.2.6.2 yamt if (newfrag->ofs & (PAGE_SIZE - 1)) {
723 1.2.6.2 yamt struct chfs_node_frag *prev = frag_prev(&ip->fragtree, newfrag);
724 1.2.6.2 yamt
725 1.2.6.2 yamt CHFS_MARK_REF_NORMAL(fd->nref);
726 1.2.6.2 yamt if (prev->node)
727 1.2.6.2 yamt CHFS_MARK_REF_NORMAL(prev->node->nref);
728 1.2.6.2 yamt }
729 1.2.6.2 yamt
730 1.2.6.2 yamt if ((newfrag->ofs+newfrag->size) & (PAGE_SIZE - 1)) {
731 1.2.6.2 yamt struct chfs_node_frag *next = frag_next(&ip->fragtree, newfrag);
732 1.2.6.2 yamt
733 1.2.6.2 yamt if (next) {
734 1.2.6.2 yamt CHFS_MARK_REF_NORMAL(fd->nref);
735 1.2.6.2 yamt if (next->node)
736 1.2.6.2 yamt CHFS_MARK_REF_NORMAL(next->node->nref);
737 1.2.6.2 yamt }
738 1.2.6.2 yamt }
739 1.2.6.2 yamt
740 1.2.6.2 yamt return 0;
741 1.2.6.2 yamt }
742 1.2.6.2 yamt
743 1.2.6.2 yamt
744 1.2.6.2 yamt /*
745 1.2.6.2 yamt * -----------------------
746 1.2.6.2 yamt * general node operations
747 1.2.6.2 yamt * -----------------------
748 1.2.6.2 yamt */
749 1.2.6.2 yamt /* get tmp nodes of an inode */
750 1.2.6.2 yamt int
751 1.2.6.2 yamt chfs_get_data_nodes(struct chfs_mount *chmp,
752 1.2.6.2 yamt struct chfs_inode *ip,
753 1.2.6.2 yamt struct chfs_readinode_info *rii)
754 1.2.6.2 yamt {
755 1.2.6.2 yamt uint32_t crc;
756 1.2.6.2 yamt int err;
757 1.2.6.2 yamt size_t len, retlen;
758 1.2.6.2 yamt struct chfs_node_ref *nref;
759 1.2.6.2 yamt struct chfs_flash_data_node *dnode;
760 1.2.6.2 yamt struct chfs_tmp_dnode *td;
761 1.2.6.2 yamt char* buf;
762 1.2.6.2 yamt
763 1.2.6.2 yamt len = sizeof(struct chfs_flash_data_node);
764 1.2.6.2 yamt buf = kmem_alloc(len, KM_SLEEP);
765 1.2.6.2 yamt
766 1.2.6.2 yamt dnode = kmem_alloc(len, KM_SLEEP);
767 1.2.6.2 yamt if (!dnode)
768 1.2.6.2 yamt return ENOMEM;
769 1.2.6.2 yamt
770 1.2.6.2 yamt nref = chfs_first_valid_data_ref(ip->chvc->dnode);
771 1.2.6.2 yamt
772 1.2.6.2 yamt rii->highest_version = ip->chvc->highest_version;
773 1.2.6.2 yamt
774 1.2.6.2 yamt while(nref && (struct chfs_vnode_cache *)nref != ip->chvc) {
775 1.2.6.2 yamt err = chfs_read_leb(chmp, nref->nref_lnr, buf, CHFS_GET_OFS(nref->nref_offset), len, &retlen);
776 1.2.6.2 yamt if (err || len != retlen)
777 1.2.6.2 yamt goto out;
778 1.2.6.2 yamt dnode = (struct chfs_flash_data_node*)buf;
779 1.2.6.2 yamt
780 1.2.6.2 yamt //check header crc
781 1.2.6.2 yamt crc = crc32(0, (uint8_t *)dnode, CHFS_NODE_HDR_SIZE - 4);
782 1.2.6.2 yamt if (crc != le32toh(dnode->hdr_crc)) {
783 1.2.6.2 yamt chfs_err("CRC check failed. calc: 0x%x orig: 0x%x\n", crc, le32toh(dnode->hdr_crc));
784 1.2.6.2 yamt goto cont;
785 1.2.6.2 yamt }
786 1.2.6.2 yamt //check header magic bitmask
787 1.2.6.2 yamt if (le16toh(dnode->magic) != CHFS_FS_MAGIC_BITMASK) {
788 1.2.6.2 yamt chfs_err("Wrong magic bitmask.\n");
789 1.2.6.2 yamt goto cont;
790 1.2.6.2 yamt }
791 1.2.6.2 yamt //check node crc
792 1.2.6.2 yamt crc = crc32(0, (uint8_t *)dnode, sizeof(*dnode) - 4);
793 1.2.6.2 yamt if (crc != le32toh(dnode->node_crc)) {
794 1.2.6.2 yamt chfs_err("Node CRC check failed. calc: 0x%x orig: 0x%x\n", crc, le32toh(dnode->node_crc));
795 1.2.6.2 yamt goto cont;
796 1.2.6.2 yamt }
797 1.2.6.2 yamt td = chfs_alloc_tmp_dnode();
798 1.2.6.2 yamt if (!td) {
799 1.2.6.2 yamt chfs_err("Can't allocate tmp dnode info.\n");
800 1.2.6.2 yamt err = ENOMEM;
801 1.2.6.2 yamt goto out;
802 1.2.6.2 yamt }
803 1.2.6.2 yamt /* We don't check data crc here, just add nodes to tmp frag tree, because
804 1.2.6.2 yamt * we don't want to check nodes which have been overlapped by a new node
805 1.2.6.2 yamt * with a higher version number.
806 1.2.6.2 yamt */
807 1.2.6.2 yamt td->node = chfs_alloc_full_dnode();
808 1.2.6.2 yamt if (!td->node) {
809 1.2.6.2 yamt chfs_err("Can't allocate full dnode info.\n");
810 1.2.6.2 yamt err = ENOMEM;
811 1.2.6.2 yamt goto out_tmp_dnode;
812 1.2.6.2 yamt }
813 1.2.6.2 yamt td->version = le64toh(dnode->version);
814 1.2.6.2 yamt td->node->ofs = le64toh(dnode->offset);
815 1.2.6.2 yamt td->data_crc = le32toh(dnode->data_crc);
816 1.2.6.2 yamt td->node->nref = nref;
817 1.2.6.2 yamt td->node->size = le32toh(dnode->data_length);
818 1.2.6.2 yamt td->overlapped = 0;
819 1.2.6.2 yamt
820 1.2.6.2 yamt if (td->version > rii->highest_version) {
821 1.2.6.2 yamt rii->highest_version = td->version;
822 1.2.6.2 yamt }
823 1.2.6.2 yamt
824 1.2.6.2 yamt err = chfs_add_tmp_dnode_to_tree(chmp, rii, td);
825 1.2.6.2 yamt if (err)
826 1.2.6.2 yamt goto out_full_dnode;
827 1.2.6.2 yamt
828 1.2.6.2 yamt cont:
829 1.2.6.2 yamt nref = chfs_first_valid_data_ref(nref->nref_next);
830 1.2.6.2 yamt }
831 1.2.6.2 yamt
832 1.2.6.2 yamt ip->chvc->highest_version = rii->highest_version;
833 1.2.6.2 yamt return 0;
834 1.2.6.2 yamt
835 1.2.6.2 yamt /* Exit points */
836 1.2.6.2 yamt out_full_dnode:
837 1.2.6.2 yamt chfs_free_full_dnode(td->node);
838 1.2.6.2 yamt out_tmp_dnode:
839 1.2.6.2 yamt chfs_free_tmp_dnode(td);
840 1.2.6.2 yamt out:
841 1.2.6.2 yamt kmem_free(buf, len);
842 1.2.6.2 yamt kmem_free(dnode, len);
843 1.2.6.2 yamt return err;
844 1.2.6.2 yamt }
845 1.2.6.2 yamt
846 1.2.6.2 yamt
847 1.2.6.2 yamt /* Build final normal fragtree from tdi tree. */
848 1.2.6.2 yamt int
849 1.2.6.2 yamt chfs_build_fragtree(struct chfs_mount *chmp, struct chfs_inode *ip,
850 1.2.6.2 yamt struct chfs_readinode_info *rii)
851 1.2.6.2 yamt {
852 1.2.6.2 yamt struct chfs_tmp_dnode_info *pen, *last, *this;
853 1.2.6.2 yamt struct rb_tree ver_tree; /* version tree */
854 1.2.6.2 yamt uint64_t high_ver = 0;
855 1.2.6.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
856 1.2.6.2 yamt
857 1.2.6.2 yamt rb_tree_init(&ver_tree, &tmp_node_rbtree_ops);
858 1.2.6.2 yamt
859 1.2.6.2 yamt if (rii->mdata_tn) {
860 1.2.6.2 yamt high_ver = rii->mdata_tn->tmpnode->version;
861 1.2.6.2 yamt rii->latest_ref = rii->mdata_tn->tmpnode->node->nref;
862 1.2.6.2 yamt }
863 1.2.6.2 yamt
864 1.2.6.2 yamt pen = (struct chfs_tmp_dnode_info *)RB_TREE_MAX(&rii->tdi_root);
865 1.2.6.2 yamt
866 1.2.6.2 yamt while((last = pen)) {
867 1.2.6.2 yamt pen = (struct chfs_tmp_dnode_info *)rb_tree_iterate(&rii->tdi_root, last, RB_DIR_LEFT);
868 1.2.6.2 yamt
869 1.2.6.2 yamt rb_tree_remove_node(&rii->tdi_root, last);
870 1.2.6.2 yamt rb_tree_insert_node(&ver_tree, last);
871 1.2.6.2 yamt
872 1.2.6.2 yamt if (last->tmpnode->overlapped) {
873 1.2.6.2 yamt if (pen)
874 1.2.6.2 yamt continue;
875 1.2.6.2 yamt
876 1.2.6.2 yamt last->tmpnode->overlapped = 0;
877 1.2.6.2 yamt }
878 1.2.6.2 yamt
879 1.2.6.2 yamt this = (struct chfs_tmp_dnode_info *)RB_TREE_MAX(&ver_tree);
880 1.2.6.2 yamt
881 1.2.6.2 yamt while (this) {
882 1.2.6.2 yamt struct chfs_tmp_dnode_info *vers_next;
883 1.2.6.2 yamt int ret;
884 1.2.6.2 yamt
885 1.2.6.2 yamt vers_next = (struct chfs_tmp_dnode_info *)rb_tree_iterate(&ver_tree, this, RB_DIR_LEFT);
886 1.2.6.2 yamt rb_tree_remove_node(&ver_tree, this);
887 1.2.6.2 yamt
888 1.2.6.2 yamt struct chfs_tmp_dnode *tmp_td = this->tmpnode;
889 1.2.6.2 yamt while (tmp_td) {
890 1.2.6.2 yamt struct chfs_tmp_dnode *next_td = tmp_td->next;
891 1.2.6.2 yamt
892 1.2.6.2 yamt if (chfs_check_td_node(chmp, tmp_td)) {
893 1.2.6.2 yamt if (next_td) {
894 1.2.6.2 yamt chfs_remove_tmp_dnode_from_tdi(this, tmp_td);
895 1.2.6.2 yamt } else {
896 1.2.6.2 yamt break;
897 1.2.6.2 yamt }
898 1.2.6.2 yamt } else {
899 1.2.6.2 yamt if (tmp_td->version > high_ver) {
900 1.2.6.2 yamt high_ver = tmp_td->version;
901 1.2.6.2 yamt dbg("highver: %llu\n", (unsigned long long)high_ver);
902 1.2.6.2 yamt rii->latest_ref = tmp_td->node->nref;
903 1.2.6.2 yamt }
904 1.2.6.2 yamt
905 1.2.6.2 yamt ret = chfs_add_full_dnode_to_inode(chmp, ip, tmp_td->node);
906 1.2.6.2 yamt if (ret) {
907 1.2.6.2 yamt while (1) {
908 1.2.6.2 yamt vers_next = (struct chfs_tmp_dnode_info *)rb_tree_iterate(&ver_tree, this, RB_DIR_LEFT);
909 1.2.6.2 yamt while (tmp_td) {
910 1.2.6.2 yamt next_td = tmp_td->next;
911 1.2.6.2 yamt if (chfs_check_td_node(chmp, tmp_td) > 1) {
912 1.2.6.2 yamt chfs_mark_node_obsolete(chmp,
913 1.2.6.2 yamt tmp_td->node->nref);
914 1.2.6.2 yamt }
915 1.2.6.2 yamt chfs_free_full_dnode(tmp_td->node);
916 1.2.6.2 yamt chfs_remove_tmp_dnode_from_tdi(this, tmp_td);
917 1.2.6.2 yamt chfs_free_tmp_dnode(tmp_td);
918 1.2.6.2 yamt tmp_td = next_td;
919 1.2.6.2 yamt }
920 1.2.6.2 yamt chfs_free_tmp_dnode_info(this);
921 1.2.6.2 yamt this = vers_next;
922 1.2.6.2 yamt if (!this)
923 1.2.6.2 yamt break;
924 1.2.6.2 yamt rb_tree_remove_node(&ver_tree, vers_next);
925 1.2.6.2 yamt }
926 1.2.6.2 yamt return ret;
927 1.2.6.2 yamt }
928 1.2.6.2 yamt
929 1.2.6.2 yamt chfs_remove_tmp_dnode_from_tdi(this, tmp_td);
930 1.2.6.2 yamt chfs_free_tmp_dnode(tmp_td);
931 1.2.6.2 yamt }
932 1.2.6.2 yamt tmp_td = next_td;
933 1.2.6.2 yamt }
934 1.2.6.2 yamt chfs_kill_tdi(chmp, this);
935 1.2.6.2 yamt this = vers_next;
936 1.2.6.2 yamt }
937 1.2.6.2 yamt }
938 1.2.6.2 yamt
939 1.2.6.2 yamt return 0;
940 1.2.6.2 yamt }
941 1.2.6.2 yamt
942 1.2.6.2 yamt int chfs_read_inode(struct chfs_mount *chmp, struct chfs_inode *ip)
943 1.2.6.2 yamt {
944 1.2.6.2 yamt struct chfs_vnode_cache *vc = ip->chvc;
945 1.2.6.2 yamt
946 1.2.6.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
947 1.2.6.2 yamt
948 1.2.6.2 yamt retry:
949 1.2.6.2 yamt /* XXX locking */
950 1.2.6.2 yamt //mutex_enter(&chmp->chm_lock_vnocache);
951 1.2.6.2 yamt switch (vc->state) {
952 1.2.6.2 yamt case VNO_STATE_UNCHECKED:
953 1.2.6.2 yamt case VNO_STATE_CHECKEDABSENT:
954 1.2.6.2 yamt // chfs_vnode_cache_set_state(chmp, vc, VNO_STATE_READING);
955 1.2.6.2 yamt vc->state = VNO_STATE_READING;
956 1.2.6.2 yamt break;
957 1.2.6.2 yamt case VNO_STATE_CHECKING:
958 1.2.6.2 yamt case VNO_STATE_GC:
959 1.2.6.2 yamt //sleep_on_spinunlock(&chmp->chm_lock_vnocache);
960 1.2.6.2 yamt //KASSERT(!mutex_owned(&chmp->chm_lock_vnocache));
961 1.2.6.2 yamt goto retry;
962 1.2.6.2 yamt break;
963 1.2.6.2 yamt case VNO_STATE_PRESENT:
964 1.2.6.2 yamt case VNO_STATE_READING:
965 1.2.6.2 yamt chfs_err("Reading inode #%llu in state %d!\n",
966 1.2.6.2 yamt (unsigned long long)vc->vno, vc->state);
967 1.2.6.2 yamt chfs_err("wants to read a nonexistent ino %llu\n",
968 1.2.6.2 yamt (unsigned long long)vc->vno);
969 1.2.6.2 yamt return ENOENT;
970 1.2.6.2 yamt default:
971 1.2.6.2 yamt panic("BUG() Bad vno cache state.");
972 1.2.6.2 yamt }
973 1.2.6.2 yamt //mutex_exit(&chmp->chm_lock_vnocache);
974 1.2.6.2 yamt
975 1.2.6.2 yamt return chfs_read_inode_internal(chmp, ip);
976 1.2.6.2 yamt }
977 1.2.6.2 yamt
978 1.2.6.2 yamt /*
979 1.2.6.2 yamt * Read inode frags.
980 1.2.6.2 yamt * Firstly get tmp nodes,
981 1.2.6.2 yamt * secondly build fragtree from those.
982 1.2.6.2 yamt */
983 1.2.6.2 yamt int
984 1.2.6.2 yamt chfs_read_inode_internal(struct chfs_mount *chmp, struct chfs_inode *ip)
985 1.2.6.2 yamt {
986 1.2.6.2 yamt int err;
987 1.2.6.2 yamt size_t len, retlen;
988 1.2.6.2 yamt char* buf;
989 1.2.6.2 yamt struct chfs_readinode_info rii;
990 1.2.6.2 yamt struct chfs_flash_vnode *fvnode;
991 1.2.6.2 yamt
992 1.2.6.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
993 1.2.6.2 yamt
994 1.2.6.2 yamt len = sizeof(*fvnode);
995 1.2.6.2 yamt
996 1.2.6.2 yamt memset(&rii, 0, sizeof(rii));
997 1.2.6.2 yamt
998 1.2.6.2 yamt rb_tree_init(&rii.tdi_root, &tmp_node_rbtree_ops);
999 1.2.6.2 yamt
1000 1.2.6.2 yamt /* build up a temp node frag tree */
1001 1.2.6.2 yamt err = chfs_get_data_nodes(chmp, ip, &rii);
1002 1.2.6.2 yamt if (err) {
1003 1.2.6.2 yamt if (ip->chvc->state == VNO_STATE_READING)
1004 1.2.6.2 yamt ip->chvc->state = VNO_STATE_CHECKEDABSENT;
1005 1.2.6.2 yamt /* FIXME Should we kill fragtree or something here? */
1006 1.2.6.2 yamt return err;
1007 1.2.6.2 yamt }
1008 1.2.6.2 yamt
1009 1.2.6.2 yamt rb_tree_init(&ip->fragtree, &frag_rbtree_ops);
1010 1.2.6.2 yamt /*
1011 1.2.6.2 yamt * build fragtree from temp nodes
1012 1.2.6.2 yamt */
1013 1.2.6.2 yamt err = chfs_build_fragtree(chmp, ip, &rii);
1014 1.2.6.2 yamt if (err) {
1015 1.2.6.2 yamt if (ip->chvc->state == VNO_STATE_READING)
1016 1.2.6.2 yamt ip->chvc->state = VNO_STATE_CHECKEDABSENT;
1017 1.2.6.2 yamt /* FIXME Should we kill fragtree or something here? */
1018 1.2.6.2 yamt return err;
1019 1.2.6.2 yamt }
1020 1.2.6.2 yamt
1021 1.2.6.2 yamt if (!rii.latest_ref) {
1022 1.2.6.2 yamt return 0;
1023 1.2.6.2 yamt }
1024 1.2.6.2 yamt
1025 1.2.6.2 yamt buf = kmem_alloc(len, KM_SLEEP);
1026 1.2.6.2 yamt if (!buf)
1027 1.2.6.2 yamt return ENOMEM;
1028 1.2.6.2 yamt
1029 1.2.6.2 yamt /*
1030 1.2.6.2 yamt * set inode size from chvc->v
1031 1.2.6.2 yamt */
1032 1.2.6.2 yamt err = chfs_read_leb(chmp, ip->chvc->v->nref_lnr, buf, CHFS_GET_OFS(ip->chvc->v->nref_offset), len, &retlen);
1033 1.2.6.2 yamt if (err || retlen != len) {
1034 1.2.6.2 yamt kmem_free(buf, len);
1035 1.2.6.2 yamt return err?err:EIO;
1036 1.2.6.2 yamt }
1037 1.2.6.2 yamt
1038 1.2.6.2 yamt fvnode = (struct chfs_flash_vnode*)buf;
1039 1.2.6.2 yamt
1040 1.2.6.2 yamt dbg("set size from v: %u\n", fvnode->dn_size);
1041 1.2.6.2 yamt chfs_set_vnode_size(ITOV(ip), fvnode->dn_size);
1042 1.2.6.2 yamt uint32_t retsize = chfs_truncate_fragtree(chmp, &ip->fragtree, fvnode->dn_size);
1043 1.2.6.2 yamt if (retsize != fvnode->dn_size) {
1044 1.2.6.2 yamt dbg("Truncating failed. It is %u instead of %u\n", retsize, fvnode->dn_size);
1045 1.2.6.2 yamt }
1046 1.2.6.2 yamt
1047 1.2.6.2 yamt kmem_free(buf, len);
1048 1.2.6.2 yamt
1049 1.2.6.2 yamt if (ip->chvc->state == VNO_STATE_READING) {
1050 1.2.6.2 yamt ip->chvc->state = VNO_STATE_PRESENT;
1051 1.2.6.2 yamt }
1052 1.2.6.2 yamt
1053 1.2.6.2 yamt return 0;
1054 1.2.6.2 yamt }
1055 1.2.6.2 yamt
1056 1.2.6.2 yamt int
1057 1.2.6.2 yamt chfs_read_data(struct chfs_mount* chmp, struct vnode *vp,
1058 1.2.6.2 yamt struct buf *bp)
1059 1.2.6.2 yamt {
1060 1.2.6.2 yamt off_t ofs;
1061 1.2.6.2 yamt struct chfs_node_frag *frag;
1062 1.2.6.2 yamt char * buf;
1063 1.2.6.2 yamt int err = 0;
1064 1.2.6.2 yamt size_t size, retlen;
1065 1.2.6.2 yamt uint32_t crc;
1066 1.2.6.2 yamt struct chfs_inode *ip = VTOI(vp);
1067 1.2.6.2 yamt struct chfs_flash_data_node *dnode;
1068 1.2.6.2 yamt struct chfs_node_ref *nref;
1069 1.2.6.2 yamt
1070 1.2.6.2 yamt memset(bp->b_data, 0, bp->b_bcount);
1071 1.2.6.2 yamt
1072 1.2.6.2 yamt ofs = bp->b_blkno * PAGE_SIZE;
1073 1.2.6.2 yamt frag = (struct chfs_node_frag *)rb_tree_find_node_leq(&ip->fragtree, &ofs);
1074 1.2.6.2 yamt
1075 1.2.6.2 yamt if (!frag || frag->ofs > ofs || frag->ofs + frag->size <= ofs) {
1076 1.2.6.2 yamt dbg("not found in frag tree\n");
1077 1.2.6.2 yamt return 0;
1078 1.2.6.2 yamt }
1079 1.2.6.2 yamt
1080 1.2.6.2 yamt if (!frag->node) {
1081 1.2.6.2 yamt dbg("no node in frag\n");
1082 1.2.6.2 yamt return 0;
1083 1.2.6.2 yamt }
1084 1.2.6.2 yamt
1085 1.2.6.2 yamt nref = frag->node->nref;
1086 1.2.6.2 yamt
1087 1.2.6.2 yamt size = sizeof(*dnode) + frag->size;
1088 1.2.6.2 yamt
1089 1.2.6.2 yamt buf = kmem_alloc(size, KM_SLEEP);
1090 1.2.6.2 yamt
1091 1.2.6.2 yamt dbg("reading from lnr: %u, offset: %u, size: %zu\n", nref->nref_lnr, CHFS_GET_OFS(nref->nref_offset), size);
1092 1.2.6.2 yamt err = chfs_read_leb(chmp, nref->nref_lnr, buf, CHFS_GET_OFS(nref->nref_offset), size, &retlen);
1093 1.2.6.2 yamt if (err) {
1094 1.2.6.2 yamt chfs_err("error after reading: %d\n", err);
1095 1.2.6.2 yamt goto out;
1096 1.2.6.2 yamt }
1097 1.2.6.2 yamt if (retlen != size) {
1098 1.2.6.2 yamt chfs_err("retlen: %zu != size: %zu\n", retlen, size);
1099 1.2.6.2 yamt err = EIO;
1100 1.2.6.2 yamt goto out;
1101 1.2.6.2 yamt }
1102 1.2.6.2 yamt
1103 1.2.6.2 yamt dnode = (struct chfs_flash_data_node *)buf;
1104 1.2.6.2 yamt crc = crc32(0, (uint8_t *)dnode, CHFS_NODE_HDR_SIZE - 4);
1105 1.2.6.2 yamt if (crc != le32toh(dnode->hdr_crc)) {
1106 1.2.6.2 yamt chfs_err("CRC check failed. calc: 0x%x orig: 0x%x\n", crc, le32toh(dnode->hdr_crc));
1107 1.2.6.2 yamt err = EIO;
1108 1.2.6.2 yamt goto out;
1109 1.2.6.2 yamt }
1110 1.2.6.2 yamt //check header magic bitmask
1111 1.2.6.2 yamt if (le16toh(dnode->magic) != CHFS_FS_MAGIC_BITMASK) {
1112 1.2.6.2 yamt chfs_err("Wrong magic bitmask.\n");
1113 1.2.6.2 yamt err = EIO;
1114 1.2.6.2 yamt goto out;
1115 1.2.6.2 yamt }
1116 1.2.6.2 yamt //check node crc
1117 1.2.6.2 yamt crc = crc32(0, (uint8_t *)dnode, sizeof(*dnode) - 4);
1118 1.2.6.2 yamt if (crc != le32toh(dnode->node_crc)) {
1119 1.2.6.2 yamt chfs_err("Node CRC check failed. calc: 0x%x orig: 0x%x\n", crc, le32toh(dnode->node_crc));
1120 1.2.6.2 yamt err = EIO;
1121 1.2.6.2 yamt goto out;
1122 1.2.6.2 yamt }
1123 1.2.6.2 yamt crc = crc32(0, (uint8_t *)dnode->data, dnode->data_length);
1124 1.2.6.2 yamt if (crc != le32toh(dnode->data_crc)) {
1125 1.2.6.2 yamt chfs_err("Data CRC check failed. calc: 0x%x orig: 0x%x\n", crc, le32toh(dnode->data_crc));
1126 1.2.6.2 yamt err = EIO;
1127 1.2.6.2 yamt goto out;
1128 1.2.6.2 yamt }
1129 1.2.6.2 yamt
1130 1.2.6.2 yamt memcpy(bp->b_data, dnode->data, dnode->data_length);
1131 1.2.6.2 yamt bp->b_resid = 0;
1132 1.2.6.2 yamt
1133 1.2.6.2 yamt out:
1134 1.2.6.2 yamt kmem_free(buf, size);
1135 1.2.6.2 yamt return err;
1136 1.2.6.2 yamt }
1137