chfs_gc.c revision 1.2.6.4 1 1.2.6.4 yamt /* $NetBSD: chfs_gc.c,v 1.2.6.4 2014/05/22 11:41:18 yamt Exp $ */
2 1.2.6.2 yamt
3 1.2.6.2 yamt /*-
4 1.2.6.2 yamt * Copyright (c) 2010 Department of Software Engineering,
5 1.2.6.2 yamt * University of Szeged, Hungary
6 1.2.6.2 yamt * Copyright (c) 2010 Tamas Toth <ttoth (at) inf.u-szeged.hu>
7 1.2.6.2 yamt * Copyright (c) 2010 Adam Hoka <ahoka (at) NetBSD.org>
8 1.2.6.2 yamt * All rights reserved.
9 1.2.6.2 yamt *
10 1.2.6.2 yamt * This code is derived from software contributed to The NetBSD Foundation
11 1.2.6.2 yamt * by the Department of Software Engineering, University of Szeged, Hungary
12 1.2.6.2 yamt *
13 1.2.6.2 yamt * Redistribution and use in source and binary forms, with or without
14 1.2.6.2 yamt * modification, are permitted provided that the following conditions
15 1.2.6.2 yamt * are met:
16 1.2.6.2 yamt * 1. Redistributions of source code must retain the above copyright
17 1.2.6.2 yamt * notice, this list of conditions and the following disclaimer.
18 1.2.6.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
19 1.2.6.2 yamt * notice, this list of conditions and the following disclaimer in the
20 1.2.6.2 yamt * documentation and/or other materials provided with the distribution.
21 1.2.6.2 yamt *
22 1.2.6.2 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 1.2.6.2 yamt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 1.2.6.2 yamt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 1.2.6.2 yamt * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 1.2.6.2 yamt * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 1.2.6.2 yamt * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 1.2.6.2 yamt * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 1.2.6.2 yamt * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 1.2.6.2 yamt * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 1.2.6.2 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 1.2.6.2 yamt * SUCH DAMAGE.
33 1.2.6.2 yamt */
34 1.2.6.2 yamt
35 1.2.6.2 yamt #include "chfs.h"
36 1.2.6.2 yamt
37 1.2.6.2 yamt void chfs_gc_release_inode(struct chfs_mount *,
38 1.2.6.2 yamt struct chfs_inode *);
39 1.2.6.2 yamt struct chfs_inode *chfs_gc_fetch_inode(struct chfs_mount *,
40 1.2.6.2 yamt ino_t, uint32_t);
41 1.2.6.2 yamt int chfs_check(struct chfs_mount *, struct chfs_vnode_cache *);
42 1.2.6.2 yamt void chfs_clear_inode(struct chfs_mount *, struct chfs_inode *);
43 1.2.6.2 yamt
44 1.2.6.2 yamt
45 1.2.6.2 yamt struct chfs_eraseblock *find_gc_block(struct chfs_mount *);
46 1.2.6.2 yamt int chfs_gcollect_pristine(struct chfs_mount *,
47 1.2.6.2 yamt struct chfs_eraseblock *,
48 1.2.6.2 yamt struct chfs_vnode_cache *, struct chfs_node_ref *);
49 1.2.6.2 yamt int chfs_gcollect_live(struct chfs_mount *,
50 1.2.6.2 yamt struct chfs_eraseblock *, struct chfs_node_ref *,
51 1.2.6.2 yamt struct chfs_inode *);
52 1.2.6.2 yamt int chfs_gcollect_vnode(struct chfs_mount *, struct chfs_inode *);
53 1.2.6.2 yamt int chfs_gcollect_dirent(struct chfs_mount *,
54 1.2.6.2 yamt struct chfs_eraseblock *, struct chfs_inode *,
55 1.2.6.2 yamt struct chfs_dirent *);
56 1.2.6.2 yamt int chfs_gcollect_deletion_dirent(struct chfs_mount *,
57 1.2.6.2 yamt struct chfs_eraseblock *, struct chfs_inode *,
58 1.2.6.2 yamt struct chfs_dirent *);
59 1.2.6.2 yamt int chfs_gcollect_dnode(struct chfs_mount *,
60 1.2.6.2 yamt struct chfs_eraseblock *, struct chfs_inode *,
61 1.2.6.2 yamt struct chfs_full_dnode *, uint32_t, uint32_t);
62 1.2.6.2 yamt
63 1.2.6.3 yamt /*
64 1.2.6.3 yamt * chfs_gc_trigger - wakes up GC thread, if it should run
65 1.2.6.3 yamt * Must be called with chm_lock_mountfields held.
66 1.2.6.3 yamt */
67 1.2.6.2 yamt void
68 1.2.6.2 yamt chfs_gc_trigger(struct chfs_mount *chmp)
69 1.2.6.2 yamt {
70 1.2.6.2 yamt struct garbage_collector_thread *gc = &chmp->chm_gc_thread;
71 1.2.6.2 yamt
72 1.2.6.2 yamt if (gc->gcth_running &&
73 1.2.6.2 yamt chfs_gc_thread_should_wake(chmp)) {
74 1.2.6.2 yamt cv_signal(&gc->gcth_wakeup);
75 1.2.6.2 yamt }
76 1.2.6.2 yamt }
77 1.2.6.2 yamt
78 1.2.6.2 yamt
79 1.2.6.3 yamt /* chfs_gc_thread - garbage collector's thread */
80 1.2.6.2 yamt void
81 1.2.6.2 yamt chfs_gc_thread(void *data)
82 1.2.6.2 yamt {
83 1.2.6.2 yamt struct chfs_mount *chmp = data;
84 1.2.6.2 yamt struct garbage_collector_thread *gc = &chmp->chm_gc_thread;
85 1.2.6.2 yamt
86 1.2.6.2 yamt dbg_gc("[GC THREAD] thread started\n");
87 1.2.6.2 yamt
88 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_mountfields);
89 1.2.6.2 yamt while (gc->gcth_running) {
90 1.2.6.2 yamt /* we must call chfs_gc_thread_should_wake with chm_lock_mountfields
91 1.2.6.2 yamt * held, which is a bit awkwardly done here, but we cant relly
92 1.2.6.2 yamt * do it otherway with the current design...
93 1.2.6.2 yamt */
94 1.2.6.2 yamt if (chfs_gc_thread_should_wake(chmp)) {
95 1.2.6.2 yamt if (chfs_gcollect_pass(chmp) == ENOSPC) {
96 1.2.6.3 yamt mutex_exit(&chmp->chm_lock_mountfields);
97 1.2.6.2 yamt panic("No space for garbage collection\n");
98 1.2.6.2 yamt /* XXX why break here? i have added a panic
99 1.2.6.2 yamt * here to see if it gets triggered -ahoka
100 1.2.6.2 yamt */
101 1.2.6.2 yamt break;
102 1.2.6.2 yamt }
103 1.2.6.2 yamt /* XXX gcollect_pass drops the mutex */
104 1.2.6.2 yamt }
105 1.2.6.2 yamt
106 1.2.6.2 yamt cv_timedwait_sig(&gc->gcth_wakeup,
107 1.2.6.2 yamt &chmp->chm_lock_mountfields, mstohz(100));
108 1.2.6.2 yamt }
109 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_mountfields);
110 1.2.6.2 yamt
111 1.2.6.2 yamt dbg_gc("[GC THREAD] thread stopped\n");
112 1.2.6.2 yamt kthread_exit(0);
113 1.2.6.2 yamt }
114 1.2.6.2 yamt
115 1.2.6.3 yamt /* chfs_gc_thread_start - starts GC */
116 1.2.6.2 yamt void
117 1.2.6.2 yamt chfs_gc_thread_start(struct chfs_mount *chmp)
118 1.2.6.2 yamt {
119 1.2.6.2 yamt struct garbage_collector_thread *gc = &chmp->chm_gc_thread;
120 1.2.6.2 yamt
121 1.2.6.2 yamt cv_init(&gc->gcth_wakeup, "chfsgccv");
122 1.2.6.2 yamt
123 1.2.6.2 yamt gc->gcth_running = true;
124 1.2.6.2 yamt kthread_create(PRI_NONE, /*KTHREAD_MPSAFE |*/ KTHREAD_MUSTJOIN,
125 1.2.6.2 yamt NULL, chfs_gc_thread, chmp, &gc->gcth_thread,
126 1.2.6.2 yamt "chfsgcth");
127 1.2.6.2 yamt }
128 1.2.6.2 yamt
129 1.2.6.3 yamt /* chfs_gc_thread_start - stops GC */
130 1.2.6.2 yamt void
131 1.2.6.2 yamt chfs_gc_thread_stop(struct chfs_mount *chmp)
132 1.2.6.2 yamt {
133 1.2.6.2 yamt struct garbage_collector_thread *gc = &chmp->chm_gc_thread;
134 1.2.6.2 yamt
135 1.2.6.3 yamt /* check if it is actually running */
136 1.2.6.2 yamt if (gc->gcth_running) {
137 1.2.6.2 yamt gc->gcth_running = false;
138 1.2.6.2 yamt } else {
139 1.2.6.2 yamt return;
140 1.2.6.2 yamt }
141 1.2.6.2 yamt cv_signal(&gc->gcth_wakeup);
142 1.2.6.2 yamt dbg_gc("[GC THREAD] stop signal sent\n");
143 1.2.6.2 yamt
144 1.2.6.2 yamt kthread_join(gc->gcth_thread);
145 1.2.6.2 yamt #ifdef BROKEN_KTH_JOIN
146 1.2.6.2 yamt kpause("chfsthjoin", false, mstohz(1000), NULL);
147 1.2.6.2 yamt #endif
148 1.2.6.2 yamt
149 1.2.6.2 yamt cv_destroy(&gc->gcth_wakeup);
150 1.2.6.2 yamt }
151 1.2.6.2 yamt
152 1.2.6.3 yamt /*
153 1.2.6.3 yamt * chfs_gc_thread_should_wake - checks if GC thread should wake up
154 1.2.6.3 yamt * Must be called with chm_lock_mountfields held.
155 1.2.6.3 yamt * Returns 1, if GC should wake up and 0 else.
156 1.2.6.3 yamt */
157 1.2.6.2 yamt int
158 1.2.6.2 yamt chfs_gc_thread_should_wake(struct chfs_mount *chmp)
159 1.2.6.2 yamt {
160 1.2.6.2 yamt int nr_very_dirty = 0;
161 1.2.6.2 yamt struct chfs_eraseblock *cheb;
162 1.2.6.2 yamt uint32_t dirty;
163 1.2.6.2 yamt
164 1.2.6.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
165 1.2.6.2 yamt
166 1.2.6.3 yamt /* Erase pending queue is not empty. */
167 1.2.6.2 yamt if (!TAILQ_EMPTY(&chmp->chm_erase_pending_queue)) {
168 1.2.6.2 yamt dbg_gc("erase_pending\n");
169 1.2.6.2 yamt return 1;
170 1.2.6.2 yamt }
171 1.2.6.2 yamt
172 1.2.6.3 yamt /* There is something unchecked in the filesystem. */
173 1.2.6.2 yamt if (chmp->chm_unchecked_size) {
174 1.2.6.2 yamt dbg_gc("unchecked\n");
175 1.2.6.2 yamt return 1;
176 1.2.6.2 yamt }
177 1.2.6.2 yamt
178 1.2.6.2 yamt dirty = chmp->chm_dirty_size - chmp->chm_nr_erasable_blocks *
179 1.2.6.2 yamt chmp->chm_ebh->eb_size;
180 1.2.6.2 yamt
181 1.2.6.3 yamt /* Number of free and erasable blocks are critical. */
182 1.2.6.2 yamt if (chmp->chm_nr_free_blocks + chmp->chm_nr_erasable_blocks <
183 1.2.6.2 yamt chmp->chm_resv_blocks_gctrigger && (dirty > chmp->chm_nospc_dirty)) {
184 1.2.6.2 yamt dbg_gc("free: %d + erasable: %d < resv: %d\n",
185 1.2.6.2 yamt chmp->chm_nr_free_blocks, chmp->chm_nr_erasable_blocks,
186 1.2.6.2 yamt chmp->chm_resv_blocks_gctrigger);
187 1.2.6.2 yamt dbg_gc("dirty: %d > nospc_dirty: %d\n",
188 1.2.6.2 yamt dirty, chmp->chm_nospc_dirty);
189 1.2.6.2 yamt
190 1.2.6.2 yamt return 1;
191 1.2.6.2 yamt }
192 1.2.6.2 yamt
193 1.2.6.3 yamt /* There is too much very dirty blocks. */
194 1.2.6.2 yamt TAILQ_FOREACH(cheb, &chmp->chm_very_dirty_queue, queue) {
195 1.2.6.2 yamt nr_very_dirty++;
196 1.2.6.2 yamt if (nr_very_dirty == chmp->chm_vdirty_blocks_gctrigger) {
197 1.2.6.2 yamt dbg_gc("nr_very_dirty\n");
198 1.2.6.2 yamt return 1;
199 1.2.6.2 yamt }
200 1.2.6.2 yamt }
201 1.2.6.2 yamt
202 1.2.6.3 yamt /* Everythin OK, GC shouldn't run. */
203 1.2.6.2 yamt return 0;
204 1.2.6.2 yamt }
205 1.2.6.2 yamt
206 1.2.6.3 yamt /* chfs_gc_release_inode - does nothing yet */
207 1.2.6.2 yamt void
208 1.2.6.2 yamt chfs_gc_release_inode(struct chfs_mount *chmp,
209 1.2.6.2 yamt struct chfs_inode *ip)
210 1.2.6.2 yamt {
211 1.2.6.2 yamt dbg_gc("release inode\n");
212 1.2.6.2 yamt }
213 1.2.6.2 yamt
214 1.2.6.3 yamt /* chfs_gc_fetch_inode - assign the given inode to the GC */
215 1.2.6.2 yamt struct chfs_inode *
216 1.2.6.2 yamt chfs_gc_fetch_inode(struct chfs_mount *chmp, ino_t vno,
217 1.2.6.2 yamt uint32_t unlinked)
218 1.2.6.2 yamt {
219 1.2.6.2 yamt struct vnode *vp = NULL;
220 1.2.6.2 yamt struct chfs_vnode_cache *vc;
221 1.2.6.2 yamt struct chfs_inode *ip;
222 1.2.6.2 yamt dbg_gc("fetch inode %llu\n", (unsigned long long)vno);
223 1.2.6.2 yamt
224 1.2.6.2 yamt if (unlinked) {
225 1.2.6.2 yamt dbg_gc("unlinked\n");
226 1.2.6.2 yamt vp = chfs_vnode_lookup(chmp, vno);
227 1.2.6.2 yamt if (!vp) {
228 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_vnocache);
229 1.2.6.2 yamt vc = chfs_vnode_cache_get(chmp, vno);
230 1.2.6.2 yamt if (!vc) {
231 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
232 1.2.6.2 yamt return NULL;
233 1.2.6.2 yamt }
234 1.2.6.2 yamt if (vc->state != VNO_STATE_CHECKEDABSENT) {
235 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
236 1.2.6.2 yamt /* XXX why do we need the delay here?! */
237 1.2.6.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
238 1.2.6.2 yamt cv_timedwait_sig(
239 1.2.6.2 yamt &chmp->chm_gc_thread.gcth_wakeup,
240 1.2.6.2 yamt &chmp->chm_lock_mountfields, mstohz(50));
241 1.2.6.2 yamt } else {
242 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
243 1.2.6.2 yamt }
244 1.2.6.2 yamt return NULL;
245 1.2.6.2 yamt }
246 1.2.6.2 yamt } else {
247 1.2.6.2 yamt dbg_gc("vnode lookup\n");
248 1.2.6.2 yamt vp = chfs_vnode_lookup(chmp, vno);
249 1.2.6.2 yamt }
250 1.2.6.2 yamt dbg_gc("vp to ip\n");
251 1.2.6.2 yamt ip = VTOI(vp);
252 1.2.6.2 yamt KASSERT(ip);
253 1.2.6.2 yamt
254 1.2.6.2 yamt return ip;
255 1.2.6.2 yamt }
256 1.2.6.2 yamt
257 1.2.6.2 yamt extern rb_tree_ops_t frag_rbtree_ops;
258 1.2.6.2 yamt
259 1.2.6.3 yamt /* chfs_check - checks an inode with minimal initialization */
260 1.2.6.2 yamt int
261 1.2.6.2 yamt chfs_check(struct chfs_mount *chmp, struct chfs_vnode_cache *chvc)
262 1.2.6.2 yamt {
263 1.2.6.3 yamt KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
264 1.2.6.3 yamt
265 1.2.6.2 yamt struct chfs_inode *ip;
266 1.2.6.2 yamt struct vnode *vp;
267 1.2.6.2 yamt int ret;
268 1.2.6.2 yamt
269 1.2.6.3 yamt /* Get a new inode. */
270 1.2.6.2 yamt ip = pool_get(&chfs_inode_pool, PR_WAITOK);
271 1.2.6.2 yamt if (!ip) {
272 1.2.6.2 yamt return ENOMEM;
273 1.2.6.2 yamt }
274 1.2.6.2 yamt
275 1.2.6.2 yamt vp = kmem_zalloc(sizeof(struct vnode), KM_SLEEP);
276 1.2.6.2 yamt
277 1.2.6.3 yamt /* Minimal initialization. */
278 1.2.6.2 yamt ip->chvc = chvc;
279 1.2.6.2 yamt ip->vp = vp;
280 1.2.6.2 yamt
281 1.2.6.2 yamt vp->v_data = ip;
282 1.2.6.2 yamt
283 1.2.6.2 yamt rb_tree_init(&ip->fragtree, &frag_rbtree_ops);
284 1.2.6.2 yamt TAILQ_INIT(&ip->dents);
285 1.2.6.2 yamt
286 1.2.6.3 yamt /* Build the node. */
287 1.2.6.3 yamt mutex_exit(&chmp->chm_lock_vnocache);
288 1.2.6.2 yamt ret = chfs_read_inode_internal(chmp, ip);
289 1.2.6.3 yamt mutex_enter(&chmp->chm_lock_vnocache);
290 1.2.6.2 yamt if (!ret) {
291 1.2.6.2 yamt chfs_clear_inode(chmp, ip);
292 1.2.6.2 yamt }
293 1.2.6.2 yamt
294 1.2.6.3 yamt /* Release inode. */
295 1.2.6.2 yamt pool_put(&chfs_inode_pool, ip);
296 1.2.6.2 yamt
297 1.2.6.2 yamt return ret;
298 1.2.6.2 yamt }
299 1.2.6.2 yamt
300 1.2.6.3 yamt /* chfs_clear_inode - kills a minimal inode */
301 1.2.6.2 yamt void
302 1.2.6.2 yamt chfs_clear_inode(struct chfs_mount *chmp, struct chfs_inode *ip)
303 1.2.6.2 yamt {
304 1.2.6.3 yamt KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
305 1.2.6.3 yamt
306 1.2.6.2 yamt struct chfs_dirent *fd, *tmpfd;
307 1.2.6.2 yamt struct chfs_vnode_cache *chvc;
308 1.2.6.3 yamt struct chfs_node_ref *nref;
309 1.2.6.2 yamt
310 1.2.6.2 yamt chvc = ip->chvc;
311 1.2.6.2 yamt /* shouldnt this be: */
312 1.2.6.2 yamt //bool deleted = (chvc && !(chvc->pvno || chvc->nlink));
313 1.2.6.2 yamt int deleted = (chvc && !(chvc->pvno | chvc->nlink));
314 1.2.6.2 yamt
315 1.2.6.3 yamt /* Set actual state. */
316 1.2.6.2 yamt if (chvc && chvc->state != VNO_STATE_CHECKING) {
317 1.2.6.2 yamt chvc->state = VNO_STATE_CLEARING;
318 1.2.6.2 yamt }
319 1.2.6.2 yamt
320 1.2.6.3 yamt /* Remove vnode information. */
321 1.2.6.3 yamt while (deleted && chvc->v != (struct chfs_node_ref *)chvc) {
322 1.2.6.3 yamt nref = chvc->v;
323 1.2.6.3 yamt chfs_remove_and_obsolete(chmp, chvc, nref, &chvc->v);
324 1.2.6.2 yamt }
325 1.2.6.2 yamt
326 1.2.6.3 yamt /* Destroy data. */
327 1.2.6.3 yamt chfs_kill_fragtree(chmp, &ip->fragtree);
328 1.2.6.2 yamt
329 1.2.6.3 yamt /* Clear dirents. */
330 1.2.6.2 yamt TAILQ_FOREACH_SAFE(fd, &ip->dents, fds, tmpfd) {
331 1.2.6.2 yamt chfs_free_dirent(fd);
332 1.2.6.2 yamt }
333 1.2.6.2 yamt
334 1.2.6.3 yamt /* Remove node from vnode cache. */
335 1.2.6.2 yamt if (chvc && chvc->state == VNO_STATE_CHECKING) {
336 1.2.6.3 yamt chvc->state = VNO_STATE_CHECKEDABSENT;
337 1.2.6.2 yamt if ((struct chfs_vnode_cache *)chvc->v == chvc &&
338 1.2.6.2 yamt (struct chfs_vnode_cache *)chvc->dirents == chvc &&
339 1.2.6.2 yamt (struct chfs_vnode_cache *)chvc->dnode == chvc)
340 1.2.6.2 yamt chfs_vnode_cache_remove(chmp, chvc);
341 1.2.6.2 yamt }
342 1.2.6.2 yamt }
343 1.2.6.2 yamt
344 1.2.6.3 yamt /* find_gc_block - finds the next block for GC */
345 1.2.6.2 yamt struct chfs_eraseblock *
346 1.2.6.2 yamt find_gc_block(struct chfs_mount *chmp)
347 1.2.6.2 yamt {
348 1.2.6.2 yamt struct chfs_eraseblock *ret;
349 1.2.6.2 yamt struct chfs_eraseblock_queue *nextqueue;
350 1.2.6.2 yamt
351 1.2.6.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
352 1.2.6.2 yamt
353 1.2.6.3 yamt /* Get a random number. */
354 1.2.6.2 yamt struct timespec now;
355 1.2.6.2 yamt vfs_timestamp(&now);
356 1.2.6.2 yamt
357 1.2.6.2 yamt int n = now.tv_nsec % 128;
358 1.2.6.2 yamt
359 1.2.6.2 yamt again:
360 1.2.6.3 yamt /* Find an eraseblock queue. */
361 1.2.6.3 yamt if (n<50 && !TAILQ_EMPTY(&chmp->chm_erase_pending_queue)) {
362 1.2.6.2 yamt dbg_gc("Picking block from erase_pending_queue to GC next\n");
363 1.2.6.2 yamt nextqueue = &chmp->chm_erase_pending_queue;
364 1.2.6.2 yamt } else if (n<110 && !TAILQ_EMPTY(&chmp->chm_very_dirty_queue) ) {
365 1.2.6.2 yamt dbg_gc("Picking block from very_dirty_queue to GC next\n");
366 1.2.6.2 yamt nextqueue = &chmp->chm_very_dirty_queue;
367 1.2.6.2 yamt } else if (n<126 && !TAILQ_EMPTY(&chmp->chm_dirty_queue) ) {
368 1.2.6.2 yamt dbg_gc("Picking block from dirty_queue to GC next\n");
369 1.2.6.2 yamt nextqueue = &chmp->chm_dirty_queue;
370 1.2.6.2 yamt } else if (!TAILQ_EMPTY(&chmp->chm_clean_queue)) {
371 1.2.6.2 yamt dbg_gc("Picking block from clean_queue to GC next\n");
372 1.2.6.2 yamt nextqueue = &chmp->chm_clean_queue;
373 1.2.6.2 yamt } else if (!TAILQ_EMPTY(&chmp->chm_dirty_queue)) {
374 1.2.6.2 yamt dbg_gc("Picking block from dirty_queue to GC next"
375 1.2.6.2 yamt " (clean_queue was empty)\n");
376 1.2.6.2 yamt nextqueue = &chmp->chm_dirty_queue;
377 1.2.6.2 yamt } else if (!TAILQ_EMPTY(&chmp->chm_very_dirty_queue)) {
378 1.2.6.2 yamt dbg_gc("Picking block from very_dirty_queue to GC next"
379 1.2.6.2 yamt " (clean_queue and dirty_queue were empty)\n");
380 1.2.6.2 yamt nextqueue = &chmp->chm_very_dirty_queue;
381 1.2.6.2 yamt } else if (!TAILQ_EMPTY(&chmp->chm_erase_pending_queue)) {
382 1.2.6.2 yamt dbg_gc("Picking block from erase_pending_queue to GC next"
383 1.2.6.2 yamt " (clean_queue and {very_,}dirty_queue were empty)\n");
384 1.2.6.2 yamt nextqueue = &chmp->chm_erase_pending_queue;
385 1.2.6.2 yamt } else if (!TAILQ_EMPTY(&chmp->chm_erasable_pending_wbuf_queue)) {
386 1.2.6.2 yamt dbg_gc("Synching wbuf in order to reuse "
387 1.2.6.2 yamt "erasable_pendig_wbuf_queue blocks\n");
388 1.2.6.2 yamt rw_enter(&chmp->chm_lock_wbuf, RW_WRITER);
389 1.2.6.2 yamt chfs_flush_pending_wbuf(chmp);
390 1.2.6.2 yamt rw_exit(&chmp->chm_lock_wbuf);
391 1.2.6.2 yamt goto again;
392 1.2.6.2 yamt } else {
393 1.2.6.2 yamt dbg_gc("CHFS: no clean, dirty _or_ erasable"
394 1.2.6.2 yamt " blocks to GC from! Where are they all?\n");
395 1.2.6.2 yamt return NULL;
396 1.2.6.2 yamt }
397 1.2.6.2 yamt
398 1.2.6.3 yamt /* Get the first block of the queue. */
399 1.2.6.2 yamt ret = TAILQ_FIRST(nextqueue);
400 1.2.6.2 yamt if (chmp->chm_nextblock) {
401 1.2.6.2 yamt dbg_gc("nextblock num: %u - gcblock num: %u\n",
402 1.2.6.2 yamt chmp->chm_nextblock->lnr, ret->lnr);
403 1.2.6.2 yamt if (ret == chmp->chm_nextblock)
404 1.2.6.2 yamt goto again;
405 1.2.6.2 yamt }
406 1.2.6.2 yamt TAILQ_REMOVE(nextqueue, ret, queue);
407 1.2.6.3 yamt
408 1.2.6.3 yamt /* Set GC block. */
409 1.2.6.2 yamt chmp->chm_gcblock = ret;
410 1.2.6.3 yamt /* Set GC node. */
411 1.2.6.2 yamt ret->gc_node = ret->first_node;
412 1.2.6.2 yamt
413 1.2.6.2 yamt if (!ret->gc_node) {
414 1.2.6.2 yamt dbg_gc("Oops! ret->gc_node at LEB: %u is NULL\n", ret->lnr);
415 1.2.6.2 yamt panic("CHFS BUG - one LEB's gc_node is NULL\n");
416 1.2.6.2 yamt }
417 1.2.6.2 yamt
418 1.2.6.2 yamt /* TODO wasted size? */
419 1.2.6.2 yamt return ret;
420 1.2.6.2 yamt }
421 1.2.6.2 yamt
422 1.2.6.3 yamt /* chfs_gcollect_pass - this is the main function of GC */
423 1.2.6.2 yamt int
424 1.2.6.2 yamt chfs_gcollect_pass(struct chfs_mount *chmp)
425 1.2.6.2 yamt {
426 1.2.6.2 yamt struct chfs_vnode_cache *vc;
427 1.2.6.2 yamt struct chfs_eraseblock *eb;
428 1.2.6.2 yamt struct chfs_node_ref *nref;
429 1.2.6.2 yamt uint32_t gcblock_dirty;
430 1.2.6.2 yamt struct chfs_inode *ip;
431 1.2.6.2 yamt ino_t vno, pvno;
432 1.2.6.2 yamt uint32_t nlink;
433 1.2.6.2 yamt int ret = 0;
434 1.2.6.2 yamt
435 1.2.6.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
436 1.2.6.2 yamt
437 1.2.6.3 yamt /* Check all vnodes. */
438 1.2.6.2 yamt for (;;) {
439 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_sizes);
440 1.2.6.2 yamt
441 1.2.6.3 yamt /* Check unchecked size. */
442 1.2.6.2 yamt dbg_gc("unchecked size == %u\n", chmp->chm_unchecked_size);
443 1.2.6.2 yamt if (!chmp->chm_unchecked_size)
444 1.2.6.2 yamt break;
445 1.2.6.2 yamt
446 1.2.6.3 yamt /* Compare vnode number to the maximum. */
447 1.2.6.2 yamt if (chmp->chm_checked_vno > chmp->chm_max_vno) {
448 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
449 1.2.6.2 yamt dbg_gc("checked_vno (#%llu) > max_vno (#%llu)\n",
450 1.2.6.2 yamt (unsigned long long)chmp->chm_checked_vno,
451 1.2.6.2 yamt (unsigned long long)chmp->chm_max_vno);
452 1.2.6.2 yamt return ENOSPC;
453 1.2.6.2 yamt }
454 1.2.6.2 yamt
455 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
456 1.2.6.2 yamt
457 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_vnocache);
458 1.2.6.2 yamt dbg_gc("checking vno #%llu\n",
459 1.2.6.2 yamt (unsigned long long)chmp->chm_checked_vno);
460 1.2.6.2 yamt dbg_gc("get vnode cache\n");
461 1.2.6.3 yamt
462 1.2.6.3 yamt /* OK, Get and check the vnode cache. */
463 1.2.6.2 yamt vc = chfs_vnode_cache_get(chmp, chmp->chm_checked_vno++);
464 1.2.6.2 yamt
465 1.2.6.2 yamt if (!vc) {
466 1.2.6.2 yamt dbg_gc("!vc\n");
467 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
468 1.2.6.2 yamt continue;
469 1.2.6.2 yamt }
470 1.2.6.2 yamt
471 1.2.6.2 yamt if ((vc->pvno | vc->nlink) == 0) {
472 1.2.6.2 yamt dbg_gc("(pvno | nlink) == 0\n");
473 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
474 1.2.6.2 yamt continue;
475 1.2.6.2 yamt }
476 1.2.6.2 yamt
477 1.2.6.3 yamt /* Find out the state of the vnode. */
478 1.2.6.2 yamt dbg_gc("switch\n");
479 1.2.6.2 yamt switch (vc->state) {
480 1.2.6.2 yamt case VNO_STATE_CHECKEDABSENT:
481 1.2.6.3 yamt /* FALLTHROUGH */
482 1.2.6.2 yamt case VNO_STATE_PRESENT:
483 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
484 1.2.6.2 yamt continue;
485 1.2.6.2 yamt
486 1.2.6.2 yamt case VNO_STATE_GC:
487 1.2.6.3 yamt /* FALLTHROUGH */
488 1.2.6.2 yamt case VNO_STATE_CHECKING:
489 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
490 1.2.6.2 yamt dbg_gc("VNO_STATE GC or CHECKING\n");
491 1.2.6.2 yamt panic("CHFS BUG - vc state gc or checking\n");
492 1.2.6.2 yamt
493 1.2.6.2 yamt case VNO_STATE_READING:
494 1.2.6.2 yamt chmp->chm_checked_vno--;
495 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
496 1.2.6.2 yamt /* XXX why do we need the delay here?! */
497 1.2.6.2 yamt kpause("chvncrea", true, mstohz(50), NULL);
498 1.2.6.2 yamt
499 1.2.6.2 yamt return 0;
500 1.2.6.2 yamt
501 1.2.6.2 yamt default:
502 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
503 1.2.6.2 yamt dbg_gc("default\n");
504 1.2.6.2 yamt panic("CHFS BUG - vc state is other what we"
505 1.2.6.2 yamt " checked\n");
506 1.2.6.2 yamt
507 1.2.6.2 yamt case VNO_STATE_UNCHECKED:
508 1.2.6.2 yamt ;
509 1.2.6.2 yamt }
510 1.2.6.2 yamt
511 1.2.6.3 yamt /* We found an unchecked vnode. */
512 1.2.6.3 yamt
513 1.2.6.3 yamt vc->state = VNO_STATE_CHECKING;
514 1.2.6.2 yamt
515 1.2.6.2 yamt /* XXX check if this is too heavy to call under
516 1.2.6.2 yamt * chm_lock_vnocache
517 1.2.6.2 yamt */
518 1.2.6.2 yamt ret = chfs_check(chmp, vc);
519 1.2.6.3 yamt vc->state = VNO_STATE_CHECKEDABSENT;
520 1.2.6.2 yamt
521 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
522 1.2.6.2 yamt return ret;
523 1.2.6.2 yamt }
524 1.2.6.2 yamt
525 1.2.6.3 yamt /* Get GC block. */
526 1.2.6.2 yamt eb = chmp->chm_gcblock;
527 1.2.6.2 yamt
528 1.2.6.2 yamt if (!eb) {
529 1.2.6.2 yamt eb = find_gc_block(chmp);
530 1.2.6.2 yamt }
531 1.2.6.2 yamt
532 1.2.6.2 yamt if (!eb) {
533 1.2.6.2 yamt dbg_gc("!eb\n");
534 1.2.6.2 yamt if (!TAILQ_EMPTY(&chmp->chm_erase_pending_queue)) {
535 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
536 1.2.6.2 yamt return EAGAIN;
537 1.2.6.2 yamt }
538 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
539 1.2.6.2 yamt return EIO;
540 1.2.6.2 yamt }
541 1.2.6.2 yamt
542 1.2.6.2 yamt if (!eb->used_size) {
543 1.2.6.2 yamt dbg_gc("!eb->used_size\n");
544 1.2.6.2 yamt goto eraseit;
545 1.2.6.2 yamt }
546 1.2.6.2 yamt
547 1.2.6.3 yamt /* Get GC node. */
548 1.2.6.2 yamt nref = eb->gc_node;
549 1.2.6.2 yamt gcblock_dirty = eb->dirty_size;
550 1.2.6.2 yamt
551 1.2.6.3 yamt /* Find a node which wasn't obsoleted yet.
552 1.2.6.3 yamt * Obsoleted nodes will be simply deleted after the whole block has checked. */
553 1.2.6.2 yamt while(CHFS_REF_OBSOLETE(nref)) {
554 1.2.6.2 yamt #ifdef DBG_MSG_GC
555 1.2.6.2 yamt if (nref == chmp->chm_blocks[nref->nref_lnr].last_node) {
556 1.2.6.2 yamt dbg_gc("THIS NODE IS THE LAST NODE OF ITS EB\n");
557 1.2.6.2 yamt }
558 1.2.6.2 yamt #endif
559 1.2.6.2 yamt nref = node_next(nref);
560 1.2.6.2 yamt if (!nref) {
561 1.2.6.2 yamt eb->gc_node = nref;
562 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
563 1.2.6.2 yamt panic("CHFS BUG - nref is NULL)\n");
564 1.2.6.2 yamt }
565 1.2.6.2 yamt }
566 1.2.6.3 yamt
567 1.2.6.3 yamt /* We found a "not obsoleted" node. */
568 1.2.6.2 yamt eb->gc_node = nref;
569 1.2.6.2 yamt KASSERT(nref->nref_lnr == chmp->chm_gcblock->lnr);
570 1.2.6.2 yamt
571 1.2.6.3 yamt /* Check if node is in any chain. */
572 1.2.6.2 yamt if (!nref->nref_next) {
573 1.2.6.3 yamt /* This node is not in any chain. Simply collect it, or obsolete. */
574 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
575 1.2.6.2 yamt if (CHFS_REF_FLAGS(nref) == CHFS_PRISTINE_NODE_MASK) {
576 1.2.6.2 yamt chfs_gcollect_pristine(chmp, eb, NULL, nref);
577 1.2.6.2 yamt } else {
578 1.2.6.2 yamt chfs_mark_node_obsolete(chmp, nref);
579 1.2.6.2 yamt }
580 1.2.6.2 yamt goto lock_size;
581 1.2.6.2 yamt }
582 1.2.6.2 yamt
583 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
584 1.2.6.2 yamt
585 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_vnocache);
586 1.2.6.2 yamt
587 1.2.6.3 yamt dbg_gc("nref lnr: %u - offset: %u\n", nref->nref_lnr, nref->nref_offset);
588 1.2.6.3 yamt vc = chfs_nref_to_vc(nref);
589 1.2.6.3 yamt
590 1.2.6.3 yamt /* Check the state of the node. */
591 1.2.6.2 yamt dbg_gc("switch\n");
592 1.2.6.2 yamt switch(vc->state) {
593 1.2.6.2 yamt case VNO_STATE_CHECKEDABSENT:
594 1.2.6.3 yamt if (CHFS_REF_FLAGS(nref) == CHFS_PRISTINE_NODE_MASK) {
595 1.2.6.3 yamt vc->state = VNO_STATE_GC;
596 1.2.6.3 yamt }
597 1.2.6.3 yamt break;
598 1.2.6.2 yamt
599 1.2.6.2 yamt case VNO_STATE_PRESENT:
600 1.2.6.3 yamt break;
601 1.2.6.2 yamt
602 1.2.6.2 yamt case VNO_STATE_UNCHECKED:
603 1.2.6.3 yamt /* FALLTHROUGH */
604 1.2.6.2 yamt case VNO_STATE_CHECKING:
605 1.2.6.3 yamt /* FALLTHROUGH */
606 1.2.6.2 yamt case VNO_STATE_GC:
607 1.2.6.3 yamt mutex_exit(&chmp->chm_lock_vnocache);
608 1.2.6.3 yamt panic("CHFS BUG - vc state unchecked,"
609 1.2.6.3 yamt " checking or gc (vno #%llu, num #%d)\n",
610 1.2.6.3 yamt (unsigned long long)vc->vno, vc->state);
611 1.2.6.2 yamt
612 1.2.6.2 yamt case VNO_STATE_READING:
613 1.2.6.3 yamt /* Node is in use at this time. */
614 1.2.6.3 yamt mutex_exit(&chmp->chm_lock_vnocache);
615 1.2.6.3 yamt kpause("chvncrea", true, mstohz(50), NULL);
616 1.2.6.3 yamt return 0;
617 1.2.6.2 yamt }
618 1.2.6.2 yamt
619 1.2.6.2 yamt if (vc->state == VNO_STATE_GC) {
620 1.2.6.2 yamt dbg_gc("vc->state == VNO_STATE_GC\n");
621 1.2.6.3 yamt vc->state = VNO_STATE_CHECKEDABSENT;
622 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
623 1.2.6.2 yamt ret = chfs_gcollect_pristine(chmp, eb, NULL, nref);
624 1.2.6.2 yamt
625 1.2.6.2 yamt //TODO wake_up(&chmp->chm_vnocache_wq);
626 1.2.6.2 yamt if (ret != EBADF)
627 1.2.6.2 yamt goto test_gcnode;
628 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_vnocache);
629 1.2.6.2 yamt }
630 1.2.6.2 yamt
631 1.2.6.3 yamt /* Collect living node. */
632 1.2.6.2 yamt vno = vc->vno;
633 1.2.6.2 yamt pvno = vc->pvno;
634 1.2.6.2 yamt nlink = vc->nlink;
635 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
636 1.2.6.2 yamt
637 1.2.6.2 yamt ip = chfs_gc_fetch_inode(chmp, vno, !(pvno | nlink));
638 1.2.6.2 yamt
639 1.2.6.2 yamt if (!ip) {
640 1.2.6.2 yamt dbg_gc("!ip\n");
641 1.2.6.2 yamt ret = 0;
642 1.2.6.2 yamt goto lock_size;
643 1.2.6.2 yamt }
644 1.2.6.2 yamt
645 1.2.6.2 yamt chfs_gcollect_live(chmp, eb, nref, ip);
646 1.2.6.2 yamt
647 1.2.6.2 yamt chfs_gc_release_inode(chmp, ip);
648 1.2.6.2 yamt
649 1.2.6.2 yamt test_gcnode:
650 1.2.6.2 yamt if (eb->dirty_size == gcblock_dirty &&
651 1.2.6.2 yamt !CHFS_REF_OBSOLETE(eb->gc_node)) {
652 1.2.6.2 yamt dbg_gc("ERROR collecting node at %u failed.\n",
653 1.2.6.2 yamt CHFS_GET_OFS(eb->gc_node->nref_offset));
654 1.2.6.2 yamt
655 1.2.6.2 yamt ret = ENOSPC;
656 1.2.6.2 yamt }
657 1.2.6.2 yamt
658 1.2.6.2 yamt lock_size:
659 1.2.6.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
660 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_sizes);
661 1.2.6.2 yamt eraseit:
662 1.2.6.2 yamt dbg_gc("eraseit\n");
663 1.2.6.2 yamt
664 1.2.6.2 yamt if (chmp->chm_gcblock) {
665 1.2.6.3 yamt /* This is only for debugging. */
666 1.2.6.2 yamt dbg_gc("eb used size = %u\n", chmp->chm_gcblock->used_size);
667 1.2.6.2 yamt dbg_gc("eb free size = %u\n", chmp->chm_gcblock->free_size);
668 1.2.6.2 yamt dbg_gc("eb dirty size = %u\n", chmp->chm_gcblock->dirty_size);
669 1.2.6.2 yamt dbg_gc("eb unchecked size = %u\n",
670 1.2.6.2 yamt chmp->chm_gcblock->unchecked_size);
671 1.2.6.2 yamt dbg_gc("eb wasted size = %u\n", chmp->chm_gcblock->wasted_size);
672 1.2.6.2 yamt
673 1.2.6.2 yamt KASSERT(chmp->chm_gcblock->used_size + chmp->chm_gcblock->free_size +
674 1.2.6.2 yamt chmp->chm_gcblock->dirty_size +
675 1.2.6.2 yamt chmp->chm_gcblock->unchecked_size +
676 1.2.6.2 yamt chmp->chm_gcblock->wasted_size == chmp->chm_ebh->eb_size);
677 1.2.6.2 yamt
678 1.2.6.2 yamt }
679 1.2.6.2 yamt
680 1.2.6.3 yamt /* Check the state of GC block. */
681 1.2.6.2 yamt if (chmp->chm_gcblock && chmp->chm_gcblock->dirty_size +
682 1.2.6.2 yamt chmp->chm_gcblock->wasted_size == chmp->chm_ebh->eb_size) {
683 1.2.6.2 yamt dbg_gc("Block at leb #%u completely obsoleted by GC, "
684 1.2.6.2 yamt "Moving to erase_pending_queue\n", chmp->chm_gcblock->lnr);
685 1.2.6.2 yamt TAILQ_INSERT_TAIL(&chmp->chm_erase_pending_queue,
686 1.2.6.2 yamt chmp->chm_gcblock, queue);
687 1.2.6.2 yamt chmp->chm_gcblock = NULL;
688 1.2.6.2 yamt chmp->chm_nr_erasable_blocks++;
689 1.2.6.2 yamt if (!TAILQ_EMPTY(&chmp->chm_erase_pending_queue)) {
690 1.2.6.2 yamt ret = chfs_remap_leb(chmp);
691 1.2.6.2 yamt }
692 1.2.6.2 yamt }
693 1.2.6.2 yamt
694 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
695 1.2.6.2 yamt dbg_gc("return\n");
696 1.2.6.2 yamt return ret;
697 1.2.6.2 yamt }
698 1.2.6.2 yamt
699 1.2.6.2 yamt
700 1.2.6.3 yamt /* chfs_gcollect_pristine - collects a pristine node */
701 1.2.6.2 yamt int
702 1.2.6.2 yamt chfs_gcollect_pristine(struct chfs_mount *chmp, struct chfs_eraseblock *cheb,
703 1.2.6.2 yamt struct chfs_vnode_cache *chvc, struct chfs_node_ref *nref)
704 1.2.6.2 yamt {
705 1.2.6.2 yamt struct chfs_node_ref *newnref;
706 1.2.6.2 yamt struct chfs_flash_node_hdr *nhdr;
707 1.2.6.2 yamt struct chfs_flash_vnode *fvnode;
708 1.2.6.2 yamt struct chfs_flash_dirent_node *fdirent;
709 1.2.6.2 yamt struct chfs_flash_data_node *fdata;
710 1.2.6.2 yamt int ret, retries = 0;
711 1.2.6.2 yamt uint32_t ofs, crc;
712 1.2.6.2 yamt size_t totlen = chfs_nref_len(chmp, cheb, nref);
713 1.2.6.2 yamt char *data;
714 1.2.6.2 yamt struct iovec vec;
715 1.2.6.2 yamt size_t retlen;
716 1.2.6.2 yamt
717 1.2.6.2 yamt dbg_gc("gcollect_pristine\n");
718 1.2.6.2 yamt
719 1.2.6.2 yamt data = kmem_alloc(totlen, KM_SLEEP);
720 1.2.6.2 yamt if (!data)
721 1.2.6.2 yamt return ENOMEM;
722 1.2.6.2 yamt
723 1.2.6.2 yamt ofs = CHFS_GET_OFS(nref->nref_offset);
724 1.2.6.2 yamt
725 1.2.6.3 yamt /* Read header. */
726 1.2.6.2 yamt ret = chfs_read_leb(chmp, nref->nref_lnr, data, ofs, totlen, &retlen);
727 1.2.6.2 yamt if (ret) {
728 1.2.6.2 yamt dbg_gc("reading error\n");
729 1.2.6.2 yamt return ret;
730 1.2.6.2 yamt }
731 1.2.6.2 yamt if (retlen != totlen) {
732 1.2.6.2 yamt dbg_gc("read size error\n");
733 1.2.6.2 yamt return EIO;
734 1.2.6.2 yamt }
735 1.2.6.2 yamt nhdr = (struct chfs_flash_node_hdr *)data;
736 1.2.6.3 yamt
737 1.2.6.3 yamt /* Check the header. */
738 1.2.6.2 yamt if (le16toh(nhdr->magic) != CHFS_FS_MAGIC_BITMASK) {
739 1.2.6.2 yamt dbg_gc("node header magic number error\n");
740 1.2.6.2 yamt return EBADF;
741 1.2.6.2 yamt }
742 1.2.6.2 yamt crc = crc32(0, (uint8_t *)nhdr, CHFS_NODE_HDR_SIZE - 4);
743 1.2.6.2 yamt if (crc != le32toh(nhdr->hdr_crc)) {
744 1.2.6.2 yamt dbg_gc("node header crc error\n");
745 1.2.6.2 yamt return EBADF;
746 1.2.6.2 yamt }
747 1.2.6.2 yamt
748 1.2.6.3 yamt /* Read the remaining parts. */
749 1.2.6.2 yamt switch(le16toh(nhdr->type)) {
750 1.2.6.2 yamt case CHFS_NODETYPE_VNODE:
751 1.2.6.3 yamt /* vnode information node */
752 1.2.6.3 yamt fvnode = (struct chfs_flash_vnode *)data;
753 1.2.6.2 yamt crc = crc32(0, (uint8_t *)fvnode, sizeof(struct chfs_flash_vnode) - 4);
754 1.2.6.2 yamt if (crc != le32toh(fvnode->node_crc)) {
755 1.2.6.3 yamt dbg_gc("vnode crc error\n");
756 1.2.6.3 yamt return EBADF;
757 1.2.6.3 yamt }
758 1.2.6.3 yamt break;
759 1.2.6.2 yamt case CHFS_NODETYPE_DIRENT:
760 1.2.6.3 yamt /* dirent node */
761 1.2.6.3 yamt fdirent = (struct chfs_flash_dirent_node *)data;
762 1.2.6.2 yamt crc = crc32(0, (uint8_t *)fdirent, sizeof(struct chfs_flash_dirent_node) - 4);
763 1.2.6.2 yamt if (crc != le32toh(fdirent->node_crc)) {
764 1.2.6.3 yamt dbg_gc("dirent crc error\n");
765 1.2.6.3 yamt return EBADF;
766 1.2.6.3 yamt }
767 1.2.6.2 yamt crc = crc32(0, fdirent->name, fdirent->nsize);
768 1.2.6.2 yamt if (crc != le32toh(fdirent->name_crc)) {
769 1.2.6.3 yamt dbg_gc("dirent name crc error\n");
770 1.2.6.3 yamt return EBADF;
771 1.2.6.3 yamt }
772 1.2.6.3 yamt break;
773 1.2.6.2 yamt case CHFS_NODETYPE_DATA:
774 1.2.6.3 yamt /* data node */
775 1.2.6.3 yamt fdata = (struct chfs_flash_data_node *)data;
776 1.2.6.2 yamt crc = crc32(0, (uint8_t *)fdata, sizeof(struct chfs_flash_data_node) - 4);
777 1.2.6.2 yamt if (crc != le32toh(fdata->node_crc)) {
778 1.2.6.3 yamt dbg_gc("data node crc error\n");
779 1.2.6.3 yamt return EBADF;
780 1.2.6.3 yamt }
781 1.2.6.3 yamt break;
782 1.2.6.2 yamt default:
783 1.2.6.3 yamt /* unknown node */
784 1.2.6.3 yamt if (chvc) {
785 1.2.6.3 yamt dbg_gc("unknown node have vnode cache\n");
786 1.2.6.3 yamt return EBADF;
787 1.2.6.3 yamt }
788 1.2.6.2 yamt }
789 1.2.6.2 yamt /* CRC's OK, write node to its new place */
790 1.2.6.2 yamt retry:
791 1.2.6.2 yamt ret = chfs_reserve_space_gc(chmp, totlen);
792 1.2.6.2 yamt if (ret)
793 1.2.6.2 yamt return ret;
794 1.2.6.2 yamt
795 1.2.6.2 yamt newnref = chfs_alloc_node_ref(chmp->chm_nextblock);
796 1.2.6.2 yamt if (!newnref)
797 1.2.6.2 yamt return ENOMEM;
798 1.2.6.2 yamt
799 1.2.6.2 yamt ofs = chmp->chm_ebh->eb_size - chmp->chm_nextblock->free_size;
800 1.2.6.2 yamt newnref->nref_offset = ofs;
801 1.2.6.2 yamt
802 1.2.6.3 yamt /* write out the whole node */
803 1.2.6.2 yamt vec.iov_base = (void *)data;
804 1.2.6.2 yamt vec.iov_len = totlen;
805 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_sizes);
806 1.2.6.2 yamt ret = chfs_write_wbuf(chmp, &vec, 1, ofs, &retlen);
807 1.2.6.2 yamt
808 1.2.6.2 yamt if (ret || retlen != totlen) {
809 1.2.6.3 yamt /* error while writing */
810 1.2.6.2 yamt chfs_err("error while writing out to the media\n");
811 1.2.6.2 yamt chfs_err("err: %d | size: %zu | retlen : %zu\n",
812 1.2.6.2 yamt ret, totlen, retlen);
813 1.2.6.2 yamt
814 1.2.6.2 yamt chfs_change_size_dirty(chmp, chmp->chm_nextblock, totlen);
815 1.2.6.2 yamt if (retries) {
816 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
817 1.2.6.2 yamt return EIO;
818 1.2.6.2 yamt }
819 1.2.6.2 yamt
820 1.2.6.3 yamt /* try again */
821 1.2.6.2 yamt retries++;
822 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
823 1.2.6.2 yamt goto retry;
824 1.2.6.2 yamt }
825 1.2.6.2 yamt
826 1.2.6.3 yamt /* update vnode information */
827 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
828 1.2.6.2 yamt //TODO should we set free_size?
829 1.2.6.3 yamt mutex_enter(&chmp->chm_lock_vnocache);
830 1.2.6.2 yamt chfs_add_vnode_ref_to_vc(chmp, chvc, newnref);
831 1.2.6.3 yamt mutex_exit(&chmp->chm_lock_vnocache);
832 1.2.6.2 yamt return 0;
833 1.2.6.2 yamt }
834 1.2.6.2 yamt
835 1.2.6.2 yamt
836 1.2.6.3 yamt /* chfs_gcollect_live - collects a living node */
837 1.2.6.2 yamt int
838 1.2.6.2 yamt chfs_gcollect_live(struct chfs_mount *chmp,
839 1.2.6.2 yamt struct chfs_eraseblock *cheb, struct chfs_node_ref *nref,
840 1.2.6.2 yamt struct chfs_inode *ip)
841 1.2.6.2 yamt {
842 1.2.6.2 yamt struct chfs_node_frag *frag;
843 1.2.6.2 yamt struct chfs_full_dnode *fn = NULL;
844 1.2.6.2 yamt int start = 0, end = 0, nrfrags = 0;
845 1.2.6.2 yamt struct chfs_dirent *fd = NULL;
846 1.2.6.2 yamt int ret = 0;
847 1.2.6.2 yamt bool is_dirent;
848 1.2.6.2 yamt
849 1.2.6.2 yamt dbg_gc("gcollect_live\n");
850 1.2.6.2 yamt
851 1.2.6.2 yamt if (chmp->chm_gcblock != cheb) {
852 1.2.6.2 yamt dbg_gc("GC block is no longer gcblock. Restart.\n");
853 1.2.6.2 yamt goto upnout;
854 1.2.6.2 yamt }
855 1.2.6.2 yamt
856 1.2.6.2 yamt if (CHFS_REF_OBSOLETE(nref)) {
857 1.2.6.2 yamt dbg_gc("node to be GC'd was obsoleted in the meantime.\n");
858 1.2.6.2 yamt goto upnout;
859 1.2.6.2 yamt }
860 1.2.6.2 yamt
861 1.2.6.2 yamt /* It's a vnode? */
862 1.2.6.2 yamt if (ip->chvc->v == nref) {
863 1.2.6.2 yamt chfs_gcollect_vnode(chmp, ip);
864 1.2.6.2 yamt goto upnout;
865 1.2.6.2 yamt }
866 1.2.6.2 yamt
867 1.2.6.3 yamt /* Find data node. */
868 1.2.6.2 yamt dbg_gc("find full dnode\n");
869 1.2.6.2 yamt for(frag = frag_first(&ip->fragtree);
870 1.2.6.2 yamt frag; frag = frag_next(&ip->fragtree, frag)) {
871 1.2.6.2 yamt if (frag->node && frag->node->nref == nref) {
872 1.2.6.2 yamt fn = frag->node;
873 1.2.6.2 yamt end = frag->ofs + frag->size;
874 1.2.6.2 yamt if (!nrfrags++)
875 1.2.6.2 yamt start = frag->ofs;
876 1.2.6.2 yamt if (nrfrags == frag->node->frags)
877 1.2.6.2 yamt break;
878 1.2.6.2 yamt }
879 1.2.6.2 yamt }
880 1.2.6.2 yamt
881 1.2.6.2 yamt /* It's a pristine node, or dnode (or hole? XXX have we hole nodes?) */
882 1.2.6.2 yamt if (fn) {
883 1.2.6.2 yamt if (CHFS_REF_FLAGS(nref) == CHFS_PRISTINE_NODE_MASK) {
884 1.2.6.2 yamt ret = chfs_gcollect_pristine(chmp,
885 1.2.6.2 yamt cheb, ip->chvc, nref);
886 1.2.6.2 yamt if (!ret) {
887 1.2.6.2 yamt frag->node->nref = ip->chvc->v;
888 1.2.6.2 yamt }
889 1.2.6.2 yamt if (ret != EBADF)
890 1.2.6.2 yamt goto upnout;
891 1.2.6.2 yamt }
892 1.2.6.2 yamt ret = chfs_gcollect_dnode(chmp, cheb, ip, fn, start, end);
893 1.2.6.2 yamt goto upnout;
894 1.2.6.2 yamt }
895 1.2.6.2 yamt
896 1.2.6.3 yamt /* Is it a dirent? */
897 1.2.6.2 yamt dbg_gc("find full dirent\n");
898 1.2.6.2 yamt is_dirent = false;
899 1.2.6.2 yamt TAILQ_FOREACH(fd, &ip->dents, fds) {
900 1.2.6.2 yamt if (fd->nref == nref) {
901 1.2.6.2 yamt is_dirent = true;
902 1.2.6.2 yamt break;
903 1.2.6.2 yamt }
904 1.2.6.2 yamt }
905 1.2.6.2 yamt
906 1.2.6.2 yamt if (is_dirent && fd->vno) {
907 1.2.6.3 yamt /* Living dirent. */
908 1.2.6.2 yamt ret = chfs_gcollect_dirent(chmp, cheb, ip, fd);
909 1.2.6.2 yamt } else if (is_dirent) {
910 1.2.6.3 yamt /* Already deleted dirent. */
911 1.2.6.2 yamt ret = chfs_gcollect_deletion_dirent(chmp, cheb, ip, fd);
912 1.2.6.2 yamt } else {
913 1.2.6.2 yamt dbg_gc("Nref at leb #%u offset 0x%08x wasn't in node list"
914 1.2.6.2 yamt " for ino #%llu\n",
915 1.2.6.2 yamt nref->nref_lnr, CHFS_GET_OFS(nref->nref_offset),
916 1.2.6.2 yamt (unsigned long long)ip->ino);
917 1.2.6.2 yamt if (CHFS_REF_OBSOLETE(nref)) {
918 1.2.6.2 yamt dbg_gc("But it's obsolete so we don't mind"
919 1.2.6.2 yamt " too much.\n");
920 1.2.6.2 yamt }
921 1.2.6.2 yamt }
922 1.2.6.2 yamt
923 1.2.6.2 yamt upnout:
924 1.2.6.2 yamt return ret;
925 1.2.6.2 yamt }
926 1.2.6.2 yamt
927 1.2.6.3 yamt /* chfs_gcollect_vnode - collects a vnode information node */
928 1.2.6.2 yamt int
929 1.2.6.2 yamt chfs_gcollect_vnode(struct chfs_mount *chmp, struct chfs_inode *ip)
930 1.2.6.2 yamt {
931 1.2.6.2 yamt int ret;
932 1.2.6.2 yamt dbg_gc("gcollect_vnode\n");
933 1.2.6.2 yamt
934 1.2.6.3 yamt /* Simply write the new vnode information to the flash
935 1.2.6.3 yamt * with GC's space allocation */
936 1.2.6.2 yamt ret = chfs_write_flash_vnode(chmp, ip, ALLOC_GC);
937 1.2.6.2 yamt
938 1.2.6.2 yamt return ret;
939 1.2.6.2 yamt }
940 1.2.6.2 yamt
941 1.2.6.3 yamt /* chfs_gcollect_dirent - collects a dirent */
942 1.2.6.2 yamt int
943 1.2.6.2 yamt chfs_gcollect_dirent(struct chfs_mount *chmp,
944 1.2.6.2 yamt struct chfs_eraseblock *cheb, struct chfs_inode *parent,
945 1.2.6.2 yamt struct chfs_dirent *fd)
946 1.2.6.2 yamt {
947 1.2.6.2 yamt struct vnode *vnode = NULL;
948 1.2.6.2 yamt struct chfs_inode *ip;
949 1.2.6.2 yamt dbg_gc("gcollect_dirent\n");
950 1.2.6.2 yamt
951 1.2.6.3 yamt /* Find vnode. */
952 1.2.6.2 yamt vnode = chfs_vnode_lookup(chmp, fd->vno);
953 1.2.6.2 yamt
954 1.2.6.2 yamt /* XXX maybe KASSERT or panic on this? */
955 1.2.6.2 yamt if (vnode == NULL) {
956 1.2.6.2 yamt return ENOENT;
957 1.2.6.2 yamt }
958 1.2.6.2 yamt
959 1.2.6.2 yamt ip = VTOI(vnode);
960 1.2.6.2 yamt
961 1.2.6.3 yamt /* Remove and obsolete the previous version. */
962 1.2.6.3 yamt mutex_enter(&chmp->chm_lock_vnocache);
963 1.2.6.3 yamt chfs_remove_and_obsolete(chmp, parent->chvc, fd->nref,
964 1.2.6.3 yamt &parent->chvc->dirents);
965 1.2.6.3 yamt mutex_exit(&chmp->chm_lock_vnocache);
966 1.2.6.2 yamt
967 1.2.6.3 yamt /* Write the new dirent to the flash. */
968 1.2.6.2 yamt return chfs_write_flash_dirent(chmp,
969 1.2.6.2 yamt parent, ip, fd, fd->vno, ALLOC_GC);
970 1.2.6.2 yamt }
971 1.2.6.2 yamt
972 1.2.6.3 yamt /*
973 1.2.6.3 yamt * chfs_gcollect_deletion_dirent -
974 1.2.6.3 yamt * collects a dirent what was marked as deleted
975 1.2.6.3 yamt */
976 1.2.6.2 yamt int
977 1.2.6.2 yamt chfs_gcollect_deletion_dirent(struct chfs_mount *chmp,
978 1.2.6.2 yamt struct chfs_eraseblock *cheb, struct chfs_inode *parent,
979 1.2.6.2 yamt struct chfs_dirent *fd)
980 1.2.6.2 yamt {
981 1.2.6.2 yamt struct chfs_flash_dirent_node chfdn;
982 1.2.6.2 yamt struct chfs_node_ref *nref;
983 1.2.6.2 yamt size_t retlen, name_len, nref_len;
984 1.2.6.2 yamt uint32_t name_crc;
985 1.2.6.2 yamt
986 1.2.6.2 yamt int ret;
987 1.2.6.2 yamt
988 1.2.6.2 yamt dbg_gc("gcollect_deletion_dirent\n");
989 1.2.6.2 yamt
990 1.2.6.3 yamt /* Check node. */
991 1.2.6.2 yamt name_len = strlen(fd->name);
992 1.2.6.2 yamt name_crc = crc32(0, fd->name, name_len);
993 1.2.6.2 yamt
994 1.2.6.2 yamt nref_len = chfs_nref_len(chmp, cheb, fd->nref);
995 1.2.6.2 yamt
996 1.2.6.4 yamt (void)chfs_vnode_lookup(chmp, fd->vno);
997 1.2.6.2 yamt
998 1.2.6.3 yamt /* Find it in parent dirents. */
999 1.2.6.2 yamt for (nref = parent->chvc->dirents;
1000 1.2.6.2 yamt nref != (void*)parent->chvc;
1001 1.2.6.2 yamt nref = nref->nref_next) {
1002 1.2.6.2 yamt
1003 1.2.6.2 yamt if (!CHFS_REF_OBSOLETE(nref))
1004 1.2.6.2 yamt continue;
1005 1.2.6.2 yamt
1006 1.2.6.2 yamt /* if node refs have different length, skip */
1007 1.2.6.2 yamt if (chfs_nref_len(chmp, NULL, nref) != nref_len)
1008 1.2.6.2 yamt continue;
1009 1.2.6.2 yamt
1010 1.2.6.2 yamt if (CHFS_GET_OFS(nref->nref_offset) ==
1011 1.2.6.2 yamt CHFS_GET_OFS(fd->nref->nref_offset)) {
1012 1.2.6.2 yamt continue;
1013 1.2.6.2 yamt }
1014 1.2.6.2 yamt
1015 1.2.6.3 yamt /* read it from flash */
1016 1.2.6.2 yamt ret = chfs_read_leb(chmp,
1017 1.2.6.2 yamt nref->nref_lnr, (void*)&chfdn, CHFS_GET_OFS(nref->nref_offset),
1018 1.2.6.2 yamt nref_len, &retlen);
1019 1.2.6.2 yamt
1020 1.2.6.2 yamt if (ret) {
1021 1.2.6.2 yamt dbg_gc("Read error: %d\n", ret);
1022 1.2.6.2 yamt continue;
1023 1.2.6.2 yamt }
1024 1.2.6.2 yamt
1025 1.2.6.2 yamt if (retlen != nref_len) {
1026 1.2.6.2 yamt dbg_gc("Error reading node:"
1027 1.2.6.2 yamt " read: %zu insted of: %zu\n", retlen, nref_len);
1028 1.2.6.2 yamt continue;
1029 1.2.6.2 yamt }
1030 1.2.6.2 yamt
1031 1.2.6.2 yamt /* if node type doesn't match, skip */
1032 1.2.6.2 yamt if (le16toh(chfdn.type) != CHFS_NODETYPE_DIRENT)
1033 1.2.6.2 yamt continue;
1034 1.2.6.2 yamt
1035 1.2.6.2 yamt /* if crc doesn't match, skip */
1036 1.2.6.2 yamt if (le32toh(chfdn.name_crc) != name_crc)
1037 1.2.6.2 yamt continue;
1038 1.2.6.2 yamt
1039 1.2.6.2 yamt /* if length of name different, or this is an another deletion
1040 1.2.6.2 yamt * dirent, skip
1041 1.2.6.2 yamt */
1042 1.2.6.2 yamt if (chfdn.nsize != name_len || !le64toh(chfdn.vno))
1043 1.2.6.2 yamt continue;
1044 1.2.6.2 yamt
1045 1.2.6.2 yamt /* check actual name */
1046 1.2.6.2 yamt if (memcmp(chfdn.name, fd->name, name_len))
1047 1.2.6.2 yamt continue;
1048 1.2.6.2 yamt
1049 1.2.6.3 yamt mutex_enter(&chmp->chm_lock_vnocache);
1050 1.2.6.3 yamt chfs_remove_and_obsolete(chmp, parent->chvc, fd->nref,
1051 1.2.6.3 yamt &parent->chvc->dirents);
1052 1.2.6.3 yamt mutex_exit(&chmp->chm_lock_vnocache);
1053 1.2.6.2 yamt return chfs_write_flash_dirent(chmp,
1054 1.2.6.2 yamt parent, NULL, fd, fd->vno, ALLOC_GC);
1055 1.2.6.2 yamt }
1056 1.2.6.2 yamt
1057 1.2.6.3 yamt /* Simply remove it from the parent dirents. */
1058 1.2.6.2 yamt TAILQ_REMOVE(&parent->dents, fd, fds);
1059 1.2.6.2 yamt chfs_free_dirent(fd);
1060 1.2.6.2 yamt return 0;
1061 1.2.6.2 yamt }
1062 1.2.6.2 yamt
1063 1.2.6.3 yamt /* chfs_gcollect_dnode - */
1064 1.2.6.2 yamt int
1065 1.2.6.2 yamt chfs_gcollect_dnode(struct chfs_mount *chmp,
1066 1.2.6.2 yamt struct chfs_eraseblock *orig_cheb, struct chfs_inode *ip,
1067 1.2.6.2 yamt struct chfs_full_dnode *fn, uint32_t orig_start, uint32_t orig_end)
1068 1.2.6.2 yamt {
1069 1.2.6.3 yamt struct chfs_node_ref *nref;
1070 1.2.6.2 yamt struct chfs_full_dnode *newfn;
1071 1.2.6.2 yamt struct chfs_flash_data_node *fdnode;
1072 1.2.6.2 yamt int ret = 0, retries = 0;
1073 1.2.6.2 yamt uint32_t totlen;
1074 1.2.6.2 yamt char *data = NULL;
1075 1.2.6.2 yamt struct iovec vec;
1076 1.2.6.2 yamt size_t retlen;
1077 1.2.6.2 yamt dbg_gc("gcollect_dnode\n");
1078 1.2.6.2 yamt
1079 1.2.6.3 yamt //TODO merge frags
1080 1.2.6.2 yamt
1081 1.2.6.2 yamt KASSERT(orig_cheb->lnr == fn->nref->nref_lnr);
1082 1.2.6.2 yamt totlen = chfs_nref_len(chmp, orig_cheb, fn->nref);
1083 1.2.6.2 yamt data = kmem_alloc(totlen, KM_SLEEP);
1084 1.2.6.2 yamt
1085 1.2.6.3 yamt /* Read the node from the flash. */
1086 1.2.6.2 yamt ret = chfs_read_leb(chmp, fn->nref->nref_lnr, data, fn->nref->nref_offset,
1087 1.2.6.2 yamt totlen, &retlen);
1088 1.2.6.2 yamt
1089 1.2.6.2 yamt fdnode = (struct chfs_flash_data_node *)data;
1090 1.2.6.2 yamt fdnode->version = htole64(++ip->chvc->highest_version);
1091 1.2.6.2 yamt fdnode->node_crc = htole32(crc32(0, (uint8_t *)fdnode,
1092 1.2.6.2 yamt sizeof(*fdnode) - 4));
1093 1.2.6.2 yamt
1094 1.2.6.2 yamt vec.iov_base = (void *)data;
1095 1.2.6.2 yamt vec.iov_len = totlen;
1096 1.2.6.2 yamt
1097 1.2.6.2 yamt retry:
1098 1.2.6.3 yamt /* Set the next block where we can write. */
1099 1.2.6.2 yamt ret = chfs_reserve_space_gc(chmp, totlen);
1100 1.2.6.2 yamt if (ret)
1101 1.2.6.2 yamt goto out;
1102 1.2.6.2 yamt
1103 1.2.6.2 yamt nref = chfs_alloc_node_ref(chmp->chm_nextblock);
1104 1.2.6.2 yamt if (!nref) {
1105 1.2.6.2 yamt ret = ENOMEM;
1106 1.2.6.2 yamt goto out;
1107 1.2.6.2 yamt }
1108 1.2.6.2 yamt
1109 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_sizes);
1110 1.2.6.2 yamt
1111 1.2.6.2 yamt nref->nref_offset = chmp->chm_ebh->eb_size - chmp->chm_nextblock->free_size;
1112 1.2.6.2 yamt KASSERT(nref->nref_offset % 4 == 0);
1113 1.2.6.2 yamt chfs_change_size_free(chmp, chmp->chm_nextblock, -totlen);
1114 1.2.6.2 yamt
1115 1.2.6.3 yamt /* Write it to the writebuffer. */
1116 1.2.6.2 yamt ret = chfs_write_wbuf(chmp, &vec, 1, nref->nref_offset, &retlen);
1117 1.2.6.2 yamt if (ret || retlen != totlen) {
1118 1.2.6.3 yamt /* error during writing */
1119 1.2.6.2 yamt chfs_err("error while writing out to the media\n");
1120 1.2.6.2 yamt chfs_err("err: %d | size: %d | retlen : %zu\n",
1121 1.2.6.2 yamt ret, totlen, retlen);
1122 1.2.6.2 yamt chfs_change_size_dirty(chmp, chmp->chm_nextblock, totlen);
1123 1.2.6.2 yamt if (retries) {
1124 1.2.6.2 yamt ret = EIO;
1125 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
1126 1.2.6.2 yamt goto out;
1127 1.2.6.2 yamt }
1128 1.2.6.2 yamt
1129 1.2.6.3 yamt /* try again */
1130 1.2.6.2 yamt retries++;
1131 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
1132 1.2.6.2 yamt goto retry;
1133 1.2.6.2 yamt }
1134 1.2.6.2 yamt
1135 1.2.6.2 yamt dbg_gc("new nref lnr: %u - offset: %u\n", nref->nref_lnr, nref->nref_offset);
1136 1.2.6.2 yamt
1137 1.2.6.2 yamt chfs_change_size_used(chmp, &chmp->chm_blocks[nref->nref_lnr], totlen);
1138 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
1139 1.2.6.2 yamt KASSERT(chmp->chm_blocks[nref->nref_lnr].used_size <= chmp->chm_ebh->eb_size);
1140 1.2.6.2 yamt
1141 1.2.6.3 yamt /* Set fields of the new node. */
1142 1.2.6.2 yamt newfn = chfs_alloc_full_dnode();
1143 1.2.6.2 yamt newfn->nref = nref;
1144 1.2.6.2 yamt newfn->ofs = fn->ofs;
1145 1.2.6.2 yamt newfn->size = fn->size;
1146 1.2.6.3 yamt newfn->frags = 0;
1147 1.2.6.2 yamt
1148 1.2.6.3 yamt mutex_enter(&chmp->chm_lock_vnocache);
1149 1.2.6.3 yamt /* Remove every part of the old node. */
1150 1.2.6.3 yamt chfs_remove_frags_of_node(chmp, &ip->fragtree, fn->nref);
1151 1.2.6.3 yamt chfs_remove_and_obsolete(chmp, ip->chvc, fn->nref, &ip->chvc->dnode);
1152 1.2.6.2 yamt
1153 1.2.6.3 yamt /* Add the new nref to inode. */
1154 1.2.6.2 yamt chfs_add_full_dnode_to_inode(chmp, ip, newfn);
1155 1.2.6.2 yamt chfs_add_node_to_list(chmp,
1156 1.2.6.2 yamt ip->chvc, newfn->nref, &ip->chvc->dnode);
1157 1.2.6.3 yamt mutex_exit(&chmp->chm_lock_vnocache);
1158 1.2.6.2 yamt
1159 1.2.6.2 yamt out:
1160 1.2.6.2 yamt kmem_free(data, totlen);
1161 1.2.6.2 yamt return ret;
1162 1.2.6.2 yamt }
1163