chfs_gc.c revision 1.2.6.2 1 1.2.6.2 yamt /* $NetBSD: chfs_gc.c,v 1.2.6.2 2012/04/17 00:08:54 yamt Exp $ */
2 1.2.6.2 yamt
3 1.2.6.2 yamt /*-
4 1.2.6.2 yamt * Copyright (c) 2010 Department of Software Engineering,
5 1.2.6.2 yamt * University of Szeged, Hungary
6 1.2.6.2 yamt * Copyright (c) 2010 Tamas Toth <ttoth (at) inf.u-szeged.hu>
7 1.2.6.2 yamt * Copyright (c) 2010 Adam Hoka <ahoka (at) NetBSD.org>
8 1.2.6.2 yamt * All rights reserved.
9 1.2.6.2 yamt *
10 1.2.6.2 yamt * This code is derived from software contributed to The NetBSD Foundation
11 1.2.6.2 yamt * by the Department of Software Engineering, University of Szeged, Hungary
12 1.2.6.2 yamt *
13 1.2.6.2 yamt * Redistribution and use in source and binary forms, with or without
14 1.2.6.2 yamt * modification, are permitted provided that the following conditions
15 1.2.6.2 yamt * are met:
16 1.2.6.2 yamt * 1. Redistributions of source code must retain the above copyright
17 1.2.6.2 yamt * notice, this list of conditions and the following disclaimer.
18 1.2.6.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
19 1.2.6.2 yamt * notice, this list of conditions and the following disclaimer in the
20 1.2.6.2 yamt * documentation and/or other materials provided with the distribution.
21 1.2.6.2 yamt *
22 1.2.6.2 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 1.2.6.2 yamt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 1.2.6.2 yamt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 1.2.6.2 yamt * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 1.2.6.2 yamt * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 1.2.6.2 yamt * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 1.2.6.2 yamt * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 1.2.6.2 yamt * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 1.2.6.2 yamt * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 1.2.6.2 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 1.2.6.2 yamt * SUCH DAMAGE.
33 1.2.6.2 yamt */
34 1.2.6.2 yamt
35 1.2.6.2 yamt #include "chfs.h"
36 1.2.6.2 yamt
37 1.2.6.2 yamt void chfs_gc_release_inode(struct chfs_mount *,
38 1.2.6.2 yamt struct chfs_inode *);
39 1.2.6.2 yamt struct chfs_inode *chfs_gc_fetch_inode(struct chfs_mount *,
40 1.2.6.2 yamt ino_t, uint32_t);
41 1.2.6.2 yamt int chfs_check(struct chfs_mount *, struct chfs_vnode_cache *);
42 1.2.6.2 yamt void chfs_clear_inode(struct chfs_mount *, struct chfs_inode *);
43 1.2.6.2 yamt
44 1.2.6.2 yamt
45 1.2.6.2 yamt struct chfs_eraseblock *find_gc_block(struct chfs_mount *);
46 1.2.6.2 yamt int chfs_gcollect_pristine(struct chfs_mount *,
47 1.2.6.2 yamt struct chfs_eraseblock *,
48 1.2.6.2 yamt struct chfs_vnode_cache *, struct chfs_node_ref *);
49 1.2.6.2 yamt int chfs_gcollect_live(struct chfs_mount *,
50 1.2.6.2 yamt struct chfs_eraseblock *, struct chfs_node_ref *,
51 1.2.6.2 yamt struct chfs_inode *);
52 1.2.6.2 yamt int chfs_gcollect_vnode(struct chfs_mount *, struct chfs_inode *);
53 1.2.6.2 yamt int chfs_gcollect_dirent(struct chfs_mount *,
54 1.2.6.2 yamt struct chfs_eraseblock *, struct chfs_inode *,
55 1.2.6.2 yamt struct chfs_dirent *);
56 1.2.6.2 yamt int chfs_gcollect_deletion_dirent(struct chfs_mount *,
57 1.2.6.2 yamt struct chfs_eraseblock *, struct chfs_inode *,
58 1.2.6.2 yamt struct chfs_dirent *);
59 1.2.6.2 yamt int chfs_gcollect_dnode(struct chfs_mount *,
60 1.2.6.2 yamt struct chfs_eraseblock *, struct chfs_inode *,
61 1.2.6.2 yamt struct chfs_full_dnode *, uint32_t, uint32_t);
62 1.2.6.2 yamt
63 1.2.6.2 yamt /* must be called with chm_lock_mountfields held */
64 1.2.6.2 yamt void
65 1.2.6.2 yamt chfs_gc_trigger(struct chfs_mount *chmp)
66 1.2.6.2 yamt {
67 1.2.6.2 yamt struct garbage_collector_thread *gc = &chmp->chm_gc_thread;
68 1.2.6.2 yamt
69 1.2.6.2 yamt //mutex_enter(&chmp->chm_lock_sizes);
70 1.2.6.2 yamt if (gc->gcth_running &&
71 1.2.6.2 yamt chfs_gc_thread_should_wake(chmp)) {
72 1.2.6.2 yamt cv_signal(&gc->gcth_wakeup);
73 1.2.6.2 yamt }
74 1.2.6.2 yamt //mutex_exit(&chmp->chm_lock_sizes);
75 1.2.6.2 yamt }
76 1.2.6.2 yamt
77 1.2.6.2 yamt
78 1.2.6.2 yamt void
79 1.2.6.2 yamt chfs_gc_thread(void *data)
80 1.2.6.2 yamt {
81 1.2.6.2 yamt struct chfs_mount *chmp = data;
82 1.2.6.2 yamt struct garbage_collector_thread *gc = &chmp->chm_gc_thread;
83 1.2.6.2 yamt
84 1.2.6.2 yamt dbg_gc("[GC THREAD] thread started\n");
85 1.2.6.2 yamt
86 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_mountfields);
87 1.2.6.2 yamt while (gc->gcth_running) {
88 1.2.6.2 yamt /* we must call chfs_gc_thread_should_wake with chm_lock_mountfields
89 1.2.6.2 yamt * held, which is a bit awkwardly done here, but we cant relly
90 1.2.6.2 yamt * do it otherway with the current design...
91 1.2.6.2 yamt */
92 1.2.6.2 yamt if (chfs_gc_thread_should_wake(chmp)) {
93 1.2.6.2 yamt // mutex_exit(&chmp->chm_lock_mountfields);
94 1.2.6.2 yamt if (chfs_gcollect_pass(chmp) == ENOSPC) {
95 1.2.6.2 yamt dbg_gc("No space for garbage collection\n");
96 1.2.6.2 yamt panic("No space for garbage collection\n");
97 1.2.6.2 yamt /* XXX why break here? i have added a panic
98 1.2.6.2 yamt * here to see if it gets triggered -ahoka
99 1.2.6.2 yamt */
100 1.2.6.2 yamt break;
101 1.2.6.2 yamt }
102 1.2.6.2 yamt /* XXX gcollect_pass drops the mutex */
103 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_mountfields);
104 1.2.6.2 yamt }
105 1.2.6.2 yamt
106 1.2.6.2 yamt cv_timedwait_sig(&gc->gcth_wakeup,
107 1.2.6.2 yamt &chmp->chm_lock_mountfields, mstohz(100));
108 1.2.6.2 yamt }
109 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_mountfields);
110 1.2.6.2 yamt
111 1.2.6.2 yamt dbg_gc("[GC THREAD] thread stopped\n");
112 1.2.6.2 yamt kthread_exit(0);
113 1.2.6.2 yamt }
114 1.2.6.2 yamt
115 1.2.6.2 yamt void
116 1.2.6.2 yamt chfs_gc_thread_start(struct chfs_mount *chmp)
117 1.2.6.2 yamt {
118 1.2.6.2 yamt struct garbage_collector_thread *gc = &chmp->chm_gc_thread;
119 1.2.6.2 yamt
120 1.2.6.2 yamt cv_init(&gc->gcth_wakeup, "chfsgccv");
121 1.2.6.2 yamt
122 1.2.6.2 yamt gc->gcth_running = true;
123 1.2.6.2 yamt kthread_create(PRI_NONE, /*KTHREAD_MPSAFE |*/ KTHREAD_MUSTJOIN,
124 1.2.6.2 yamt NULL, chfs_gc_thread, chmp, &gc->gcth_thread,
125 1.2.6.2 yamt "chfsgcth");
126 1.2.6.2 yamt }
127 1.2.6.2 yamt
128 1.2.6.2 yamt void
129 1.2.6.2 yamt chfs_gc_thread_stop(struct chfs_mount *chmp)
130 1.2.6.2 yamt {
131 1.2.6.2 yamt struct garbage_collector_thread *gc = &chmp->chm_gc_thread;
132 1.2.6.2 yamt
133 1.2.6.2 yamt /* check if it is actually running. if not, do nothing */
134 1.2.6.2 yamt if (gc->gcth_running) {
135 1.2.6.2 yamt gc->gcth_running = false;
136 1.2.6.2 yamt } else {
137 1.2.6.2 yamt return;
138 1.2.6.2 yamt }
139 1.2.6.2 yamt cv_signal(&gc->gcth_wakeup);
140 1.2.6.2 yamt dbg_gc("[GC THREAD] stop signal sent\n");
141 1.2.6.2 yamt
142 1.2.6.2 yamt kthread_join(gc->gcth_thread);
143 1.2.6.2 yamt #ifdef BROKEN_KTH_JOIN
144 1.2.6.2 yamt kpause("chfsthjoin", false, mstohz(1000), NULL);
145 1.2.6.2 yamt #endif
146 1.2.6.2 yamt
147 1.2.6.2 yamt cv_destroy(&gc->gcth_wakeup);
148 1.2.6.2 yamt }
149 1.2.6.2 yamt
150 1.2.6.2 yamt /* must be called with chm_lock_mountfields held */
151 1.2.6.2 yamt int
152 1.2.6.2 yamt chfs_gc_thread_should_wake(struct chfs_mount *chmp)
153 1.2.6.2 yamt {
154 1.2.6.2 yamt int nr_very_dirty = 0;
155 1.2.6.2 yamt struct chfs_eraseblock *cheb;
156 1.2.6.2 yamt uint32_t dirty;
157 1.2.6.2 yamt
158 1.2.6.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
159 1.2.6.2 yamt
160 1.2.6.2 yamt if (!TAILQ_EMPTY(&chmp->chm_erase_pending_queue)) {
161 1.2.6.2 yamt dbg_gc("erase_pending\n");
162 1.2.6.2 yamt return 1;
163 1.2.6.2 yamt }
164 1.2.6.2 yamt
165 1.2.6.2 yamt if (chmp->chm_unchecked_size) {
166 1.2.6.2 yamt dbg_gc("unchecked\n");
167 1.2.6.2 yamt return 1;
168 1.2.6.2 yamt }
169 1.2.6.2 yamt
170 1.2.6.2 yamt dirty = chmp->chm_dirty_size - chmp->chm_nr_erasable_blocks *
171 1.2.6.2 yamt chmp->chm_ebh->eb_size;
172 1.2.6.2 yamt
173 1.2.6.2 yamt if (chmp->chm_nr_free_blocks + chmp->chm_nr_erasable_blocks <
174 1.2.6.2 yamt chmp->chm_resv_blocks_gctrigger && (dirty > chmp->chm_nospc_dirty)) {
175 1.2.6.2 yamt dbg_gc("free: %d + erasable: %d < resv: %d\n",
176 1.2.6.2 yamt chmp->chm_nr_free_blocks, chmp->chm_nr_erasable_blocks,
177 1.2.6.2 yamt chmp->chm_resv_blocks_gctrigger);
178 1.2.6.2 yamt dbg_gc("dirty: %d > nospc_dirty: %d\n",
179 1.2.6.2 yamt dirty, chmp->chm_nospc_dirty);
180 1.2.6.2 yamt
181 1.2.6.2 yamt return 1;
182 1.2.6.2 yamt }
183 1.2.6.2 yamt
184 1.2.6.2 yamt TAILQ_FOREACH(cheb, &chmp->chm_very_dirty_queue, queue) {
185 1.2.6.2 yamt nr_very_dirty++;
186 1.2.6.2 yamt if (nr_very_dirty == chmp->chm_vdirty_blocks_gctrigger) {
187 1.2.6.2 yamt dbg_gc("nr_very_dirty\n");
188 1.2.6.2 yamt return 1;
189 1.2.6.2 yamt }
190 1.2.6.2 yamt }
191 1.2.6.2 yamt
192 1.2.6.2 yamt return 0;
193 1.2.6.2 yamt }
194 1.2.6.2 yamt
195 1.2.6.2 yamt void
196 1.2.6.2 yamt chfs_gc_release_inode(struct chfs_mount *chmp,
197 1.2.6.2 yamt struct chfs_inode *ip)
198 1.2.6.2 yamt {
199 1.2.6.2 yamt dbg_gc("release inode\n");
200 1.2.6.2 yamt //mutex_exit(&ip->inode_lock);
201 1.2.6.2 yamt //vput(ITOV(ip));
202 1.2.6.2 yamt }
203 1.2.6.2 yamt
204 1.2.6.2 yamt struct chfs_inode *
205 1.2.6.2 yamt chfs_gc_fetch_inode(struct chfs_mount *chmp, ino_t vno,
206 1.2.6.2 yamt uint32_t unlinked)
207 1.2.6.2 yamt {
208 1.2.6.2 yamt struct vnode *vp = NULL;
209 1.2.6.2 yamt struct chfs_vnode_cache *vc;
210 1.2.6.2 yamt struct chfs_inode *ip;
211 1.2.6.2 yamt dbg_gc("fetch inode %llu\n", (unsigned long long)vno);
212 1.2.6.2 yamt
213 1.2.6.2 yamt if (unlinked) {
214 1.2.6.2 yamt dbg_gc("unlinked\n");
215 1.2.6.2 yamt vp = chfs_vnode_lookup(chmp, vno);
216 1.2.6.2 yamt if (!vp) {
217 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_vnocache);
218 1.2.6.2 yamt vc = chfs_vnode_cache_get(chmp, vno);
219 1.2.6.2 yamt if (!vc) {
220 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
221 1.2.6.2 yamt return NULL;
222 1.2.6.2 yamt }
223 1.2.6.2 yamt if (vc->state != VNO_STATE_CHECKEDABSENT) {
224 1.2.6.2 yamt //sleep_on_spinunlock(&chmp->chm_lock_vnocache);
225 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
226 1.2.6.2 yamt /* XXX why do we need the delay here?! */
227 1.2.6.2 yamt // kpause("chvncabs", true, mstohz(50), NULL);
228 1.2.6.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
229 1.2.6.2 yamt cv_timedwait_sig(
230 1.2.6.2 yamt &chmp->chm_gc_thread.gcth_wakeup,
231 1.2.6.2 yamt &chmp->chm_lock_mountfields, mstohz(50));
232 1.2.6.2 yamt
233 1.2.6.2 yamt // KASSERT(!mutex_owned(&chmp->chm_lock_vnocache));
234 1.2.6.2 yamt } else {
235 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
236 1.2.6.2 yamt }
237 1.2.6.2 yamt return NULL;
238 1.2.6.2 yamt }
239 1.2.6.2 yamt } else {
240 1.2.6.2 yamt dbg_gc("vnode lookup\n");
241 1.2.6.2 yamt vp = chfs_vnode_lookup(chmp, vno);
242 1.2.6.2 yamt //VFS_VGET(chmp->chm_fsmp, vno, &vp);
243 1.2.6.2 yamt }
244 1.2.6.2 yamt dbg_gc("vp to ip\n");
245 1.2.6.2 yamt ip = VTOI(vp);
246 1.2.6.2 yamt KASSERT(ip);
247 1.2.6.2 yamt //mutex_enter(&ip->inode_lock);
248 1.2.6.2 yamt
249 1.2.6.2 yamt return ip;
250 1.2.6.2 yamt }
251 1.2.6.2 yamt
252 1.2.6.2 yamt extern rb_tree_ops_t frag_rbtree_ops;
253 1.2.6.2 yamt
254 1.2.6.2 yamt int
255 1.2.6.2 yamt chfs_check(struct chfs_mount *chmp, struct chfs_vnode_cache *chvc)
256 1.2.6.2 yamt {
257 1.2.6.2 yamt struct chfs_inode *ip;
258 1.2.6.2 yamt struct vnode *vp;
259 1.2.6.2 yamt int ret;
260 1.2.6.2 yamt
261 1.2.6.2 yamt ip = pool_get(&chfs_inode_pool, PR_WAITOK);
262 1.2.6.2 yamt if (!ip) {
263 1.2.6.2 yamt return ENOMEM;
264 1.2.6.2 yamt }
265 1.2.6.2 yamt
266 1.2.6.2 yamt vp = kmem_zalloc(sizeof(struct vnode), KM_SLEEP);
267 1.2.6.2 yamt
268 1.2.6.2 yamt ip->chvc = chvc;
269 1.2.6.2 yamt ip->vp = vp;
270 1.2.6.2 yamt
271 1.2.6.2 yamt vp->v_data = ip;
272 1.2.6.2 yamt
273 1.2.6.2 yamt rb_tree_init(&ip->fragtree, &frag_rbtree_ops);
274 1.2.6.2 yamt TAILQ_INIT(&ip->dents);
275 1.2.6.2 yamt
276 1.2.6.2 yamt ret = chfs_read_inode_internal(chmp, ip);
277 1.2.6.2 yamt if (!ret) {
278 1.2.6.2 yamt chfs_clear_inode(chmp, ip);
279 1.2.6.2 yamt }
280 1.2.6.2 yamt
281 1.2.6.2 yamt pool_put(&chfs_inode_pool, ip);
282 1.2.6.2 yamt
283 1.2.6.2 yamt return ret;
284 1.2.6.2 yamt }
285 1.2.6.2 yamt
286 1.2.6.2 yamt void
287 1.2.6.2 yamt chfs_clear_inode(struct chfs_mount *chmp, struct chfs_inode *ip)
288 1.2.6.2 yamt {
289 1.2.6.2 yamt struct chfs_dirent *fd, *tmpfd;
290 1.2.6.2 yamt struct chfs_vnode_cache *chvc;
291 1.2.6.2 yamt
292 1.2.6.2 yamt
293 1.2.6.2 yamt /* XXX not sure if this is the correct locking */
294 1.2.6.2 yamt // mutex_enter(&chmp->chm_lock_vnocache);
295 1.2.6.2 yamt chvc = ip->chvc;
296 1.2.6.2 yamt /* shouldnt this be: */
297 1.2.6.2 yamt //bool deleted = (chvc && !(chvc->pvno || chvc->nlink));
298 1.2.6.2 yamt int deleted = (chvc && !(chvc->pvno | chvc->nlink));
299 1.2.6.2 yamt
300 1.2.6.2 yamt if (chvc && chvc->state != VNO_STATE_CHECKING) {
301 1.2.6.2 yamt // chfs_vnode_cache_state_set(chmp, chvc, VNO_STATE_CLEARING);
302 1.2.6.2 yamt chvc->state = VNO_STATE_CLEARING;
303 1.2.6.2 yamt }
304 1.2.6.2 yamt
305 1.2.6.2 yamt if (chvc->v && ((struct chfs_vnode_cache *)chvc->v != chvc)) {
306 1.2.6.2 yamt if (deleted)
307 1.2.6.2 yamt chfs_mark_node_obsolete(chmp, chvc->v);
308 1.2.6.2 yamt //chfs_free_refblock(chvc->v);
309 1.2.6.2 yamt }
310 1.2.6.2 yamt // mutex_enter(&chmp->chm_lock_vnocache);
311 1.2.6.2 yamt
312 1.2.6.2 yamt chfs_kill_fragtree(&ip->fragtree);
313 1.2.6.2 yamt /*
314 1.2.6.2 yamt fd = TAILQ_FIRST(&ip->dents);
315 1.2.6.2 yamt while (fd) {
316 1.2.6.2 yamt TAILQ_REMOVE(&ip->dents, fd, fds);
317 1.2.6.2 yamt chfs_free_dirent(fd);
318 1.2.6.2 yamt fd = TAILQ_FIRST(&ip->dents);
319 1.2.6.2 yamt }
320 1.2.6.2 yamt */
321 1.2.6.2 yamt
322 1.2.6.2 yamt TAILQ_FOREACH_SAFE(fd, &ip->dents, fds, tmpfd) {
323 1.2.6.2 yamt chfs_free_dirent(fd);
324 1.2.6.2 yamt }
325 1.2.6.2 yamt
326 1.2.6.2 yamt if (chvc && chvc->state == VNO_STATE_CHECKING) {
327 1.2.6.2 yamt chfs_vnode_cache_set_state(chmp,
328 1.2.6.2 yamt chvc, VNO_STATE_CHECKEDABSENT);
329 1.2.6.2 yamt if ((struct chfs_vnode_cache *)chvc->v == chvc &&
330 1.2.6.2 yamt (struct chfs_vnode_cache *)chvc->dirents == chvc &&
331 1.2.6.2 yamt (struct chfs_vnode_cache *)chvc->dnode == chvc)
332 1.2.6.2 yamt chfs_vnode_cache_remove(chmp, chvc);
333 1.2.6.2 yamt }
334 1.2.6.2 yamt
335 1.2.6.2 yamt }
336 1.2.6.2 yamt
337 1.2.6.2 yamt struct chfs_eraseblock *
338 1.2.6.2 yamt find_gc_block(struct chfs_mount *chmp)
339 1.2.6.2 yamt {
340 1.2.6.2 yamt struct chfs_eraseblock *ret;
341 1.2.6.2 yamt struct chfs_eraseblock_queue *nextqueue;
342 1.2.6.2 yamt
343 1.2.6.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
344 1.2.6.2 yamt
345 1.2.6.2 yamt struct timespec now;
346 1.2.6.2 yamt vfs_timestamp(&now);
347 1.2.6.2 yamt
348 1.2.6.2 yamt int n = now.tv_nsec % 128;
349 1.2.6.2 yamt
350 1.2.6.2 yamt //dbg_gc("n = %d\n", n);
351 1.2.6.2 yamt again:
352 1.2.6.2 yamt /* if (!TAILQ_EMPTY(&chmp->chm_bad_used_queue) && chmp->chm_nr_free_blocks > chmp->chm_nr_resv_blocks_gcbad) {
353 1.2.6.2 yamt dbg_gc("Picking block from bad_used_queue to GC next\n");
354 1.2.6.2 yamt nextqueue = &chmp->chm_bad_used_queue;
355 1.2.6.2 yamt } else */if (n<50 && !TAILQ_EMPTY(&chmp->chm_erase_pending_queue)) {
356 1.2.6.2 yamt dbg_gc("Picking block from erase_pending_queue to GC next\n");
357 1.2.6.2 yamt nextqueue = &chmp->chm_erase_pending_queue;
358 1.2.6.2 yamt } else if (n<110 && !TAILQ_EMPTY(&chmp->chm_very_dirty_queue) ) {
359 1.2.6.2 yamt dbg_gc("Picking block from very_dirty_queue to GC next\n");
360 1.2.6.2 yamt nextqueue = &chmp->chm_very_dirty_queue;
361 1.2.6.2 yamt } else if (n<126 && !TAILQ_EMPTY(&chmp->chm_dirty_queue) ) {
362 1.2.6.2 yamt dbg_gc("Picking block from dirty_queue to GC next\n");
363 1.2.6.2 yamt nextqueue = &chmp->chm_dirty_queue;
364 1.2.6.2 yamt } else if (!TAILQ_EMPTY(&chmp->chm_clean_queue)) {
365 1.2.6.2 yamt dbg_gc("Picking block from clean_queue to GC next\n");
366 1.2.6.2 yamt nextqueue = &chmp->chm_clean_queue;
367 1.2.6.2 yamt } else if (!TAILQ_EMPTY(&chmp->chm_dirty_queue)) {
368 1.2.6.2 yamt dbg_gc("Picking block from dirty_queue to GC next"
369 1.2.6.2 yamt " (clean_queue was empty)\n");
370 1.2.6.2 yamt nextqueue = &chmp->chm_dirty_queue;
371 1.2.6.2 yamt } else if (!TAILQ_EMPTY(&chmp->chm_very_dirty_queue)) {
372 1.2.6.2 yamt dbg_gc("Picking block from very_dirty_queue to GC next"
373 1.2.6.2 yamt " (clean_queue and dirty_queue were empty)\n");
374 1.2.6.2 yamt nextqueue = &chmp->chm_very_dirty_queue;
375 1.2.6.2 yamt } else if (!TAILQ_EMPTY(&chmp->chm_erase_pending_queue)) {
376 1.2.6.2 yamt dbg_gc("Picking block from erase_pending_queue to GC next"
377 1.2.6.2 yamt " (clean_queue and {very_,}dirty_queue were empty)\n");
378 1.2.6.2 yamt nextqueue = &chmp->chm_erase_pending_queue;
379 1.2.6.2 yamt } else if (!TAILQ_EMPTY(&chmp->chm_erasable_pending_wbuf_queue)) {
380 1.2.6.2 yamt dbg_gc("Synching wbuf in order to reuse "
381 1.2.6.2 yamt "erasable_pendig_wbuf_queue blocks\n");
382 1.2.6.2 yamt rw_enter(&chmp->chm_lock_wbuf, RW_WRITER);
383 1.2.6.2 yamt chfs_flush_pending_wbuf(chmp);
384 1.2.6.2 yamt rw_exit(&chmp->chm_lock_wbuf);
385 1.2.6.2 yamt goto again;
386 1.2.6.2 yamt } else {
387 1.2.6.2 yamt dbg_gc("CHFS: no clean, dirty _or_ erasable"
388 1.2.6.2 yamt " blocks to GC from! Where are they all?\n");
389 1.2.6.2 yamt return NULL;
390 1.2.6.2 yamt }
391 1.2.6.2 yamt
392 1.2.6.2 yamt ret = TAILQ_FIRST(nextqueue);
393 1.2.6.2 yamt if (chmp->chm_nextblock) {
394 1.2.6.2 yamt dbg_gc("nextblock num: %u - gcblock num: %u\n",
395 1.2.6.2 yamt chmp->chm_nextblock->lnr, ret->lnr);
396 1.2.6.2 yamt if (ret == chmp->chm_nextblock)
397 1.2.6.2 yamt goto again;
398 1.2.6.2 yamt //KASSERT(ret != chmp->chm_nextblock);
399 1.2.6.2 yamt //dbg_gc("first node lnr: %u ofs: %u\n", ret->first_node->lnr, ret->first_node->offset);
400 1.2.6.2 yamt //dbg_gc("last node lnr: %u ofs: %u\n", ret->last_node->lnr, ret->last_node->offset);
401 1.2.6.2 yamt }
402 1.2.6.2 yamt TAILQ_REMOVE(nextqueue, ret, queue);
403 1.2.6.2 yamt chmp->chm_gcblock = ret;
404 1.2.6.2 yamt ret->gc_node = ret->first_node;
405 1.2.6.2 yamt
406 1.2.6.2 yamt if (!ret->gc_node) {
407 1.2.6.2 yamt dbg_gc("Oops! ret->gc_node at LEB: %u is NULL\n", ret->lnr);
408 1.2.6.2 yamt panic("CHFS BUG - one LEB's gc_node is NULL\n");
409 1.2.6.2 yamt }
410 1.2.6.2 yamt
411 1.2.6.2 yamt /* TODO wasted size? */
412 1.2.6.2 yamt return ret;
413 1.2.6.2 yamt }
414 1.2.6.2 yamt
415 1.2.6.2 yamt
416 1.2.6.2 yamt int
417 1.2.6.2 yamt chfs_gcollect_pass(struct chfs_mount *chmp)
418 1.2.6.2 yamt {
419 1.2.6.2 yamt struct chfs_vnode_cache *vc;
420 1.2.6.2 yamt struct chfs_eraseblock *eb;
421 1.2.6.2 yamt struct chfs_node_ref *nref;
422 1.2.6.2 yamt uint32_t gcblock_dirty;
423 1.2.6.2 yamt struct chfs_inode *ip;
424 1.2.6.2 yamt ino_t vno, pvno;
425 1.2.6.2 yamt uint32_t nlink;
426 1.2.6.2 yamt int ret = 0;
427 1.2.6.2 yamt
428 1.2.6.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
429 1.2.6.2 yamt
430 1.2.6.2 yamt // mutex_enter(&chmp->chm_lock_mountfields);
431 1.2.6.2 yamt for (;;) {
432 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_sizes);
433 1.2.6.2 yamt
434 1.2.6.2 yamt dbg_gc("unchecked size == %u\n", chmp->chm_unchecked_size);
435 1.2.6.2 yamt if (!chmp->chm_unchecked_size)
436 1.2.6.2 yamt break;
437 1.2.6.2 yamt
438 1.2.6.2 yamt if (chmp->chm_checked_vno > chmp->chm_max_vno) {
439 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
440 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_mountfields);
441 1.2.6.2 yamt dbg_gc("checked_vno (#%llu) > max_vno (#%llu)\n",
442 1.2.6.2 yamt (unsigned long long)chmp->chm_checked_vno,
443 1.2.6.2 yamt (unsigned long long)chmp->chm_max_vno);
444 1.2.6.2 yamt return ENOSPC;
445 1.2.6.2 yamt }
446 1.2.6.2 yamt
447 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
448 1.2.6.2 yamt
449 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_vnocache);
450 1.2.6.2 yamt dbg_gc("checking vno #%llu\n",
451 1.2.6.2 yamt (unsigned long long)chmp->chm_checked_vno);
452 1.2.6.2 yamt dbg_gc("get vnode cache\n");
453 1.2.6.2 yamt vc = chfs_vnode_cache_get(chmp, chmp->chm_checked_vno++);
454 1.2.6.2 yamt
455 1.2.6.2 yamt if (!vc) {
456 1.2.6.2 yamt dbg_gc("!vc\n");
457 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
458 1.2.6.2 yamt continue;
459 1.2.6.2 yamt }
460 1.2.6.2 yamt
461 1.2.6.2 yamt if ((vc->pvno | vc->nlink) == 0) {
462 1.2.6.2 yamt dbg_gc("(pvno | nlink) == 0\n");
463 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
464 1.2.6.2 yamt continue;
465 1.2.6.2 yamt }
466 1.2.6.2 yamt
467 1.2.6.2 yamt dbg_gc("switch\n");
468 1.2.6.2 yamt switch (vc->state) {
469 1.2.6.2 yamt case VNO_STATE_CHECKEDABSENT:
470 1.2.6.2 yamt case VNO_STATE_PRESENT:
471 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
472 1.2.6.2 yamt continue;
473 1.2.6.2 yamt
474 1.2.6.2 yamt case VNO_STATE_GC:
475 1.2.6.2 yamt case VNO_STATE_CHECKING:
476 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
477 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_mountfields);
478 1.2.6.2 yamt dbg_gc("VNO_STATE GC or CHECKING\n");
479 1.2.6.2 yamt panic("CHFS BUG - vc state gc or checking\n");
480 1.2.6.2 yamt
481 1.2.6.2 yamt case VNO_STATE_READING:
482 1.2.6.2 yamt chmp->chm_checked_vno--;
483 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
484 1.2.6.2 yamt /* XXX why do we need the delay here?! */
485 1.2.6.2 yamt kpause("chvncrea", true, mstohz(50), NULL);
486 1.2.6.2 yamt
487 1.2.6.2 yamt // sleep_on_spinunlock(&chmp->chm_lock_vnocache);
488 1.2.6.2 yamt // KASSERT(!mutex_owned(&chmp->chm_lock_vnocache));
489 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_mountfields);
490 1.2.6.2 yamt return 0;
491 1.2.6.2 yamt
492 1.2.6.2 yamt default:
493 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
494 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_mountfields);
495 1.2.6.2 yamt dbg_gc("default\n");
496 1.2.6.2 yamt panic("CHFS BUG - vc state is other what we"
497 1.2.6.2 yamt " checked\n");
498 1.2.6.2 yamt
499 1.2.6.2 yamt case VNO_STATE_UNCHECKED:
500 1.2.6.2 yamt ;
501 1.2.6.2 yamt }
502 1.2.6.2 yamt
503 1.2.6.2 yamt chfs_vnode_cache_set_state(chmp, vc, VNO_STATE_CHECKING);
504 1.2.6.2 yamt
505 1.2.6.2 yamt /* XXX check if this is too heavy to call under
506 1.2.6.2 yamt * chm_lock_vnocache
507 1.2.6.2 yamt */
508 1.2.6.2 yamt ret = chfs_check(chmp, vc);
509 1.2.6.2 yamt dbg_gc("set state\n");
510 1.2.6.2 yamt chfs_vnode_cache_set_state(chmp,
511 1.2.6.2 yamt vc, VNO_STATE_CHECKEDABSENT);
512 1.2.6.2 yamt
513 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
514 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_mountfields);
515 1.2.6.2 yamt
516 1.2.6.2 yamt return ret;
517 1.2.6.2 yamt }
518 1.2.6.2 yamt
519 1.2.6.2 yamt
520 1.2.6.2 yamt eb = chmp->chm_gcblock;
521 1.2.6.2 yamt
522 1.2.6.2 yamt if (!eb) {
523 1.2.6.2 yamt eb = find_gc_block(chmp);
524 1.2.6.2 yamt }
525 1.2.6.2 yamt
526 1.2.6.2 yamt if (!eb) {
527 1.2.6.2 yamt dbg_gc("!eb\n");
528 1.2.6.2 yamt if (!TAILQ_EMPTY(&chmp->chm_erase_pending_queue)) {
529 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
530 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_mountfields);
531 1.2.6.2 yamt return EAGAIN;
532 1.2.6.2 yamt }
533 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
534 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_mountfields);
535 1.2.6.2 yamt return EIO;
536 1.2.6.2 yamt }
537 1.2.6.2 yamt
538 1.2.6.2 yamt if (!eb->used_size) {
539 1.2.6.2 yamt dbg_gc("!eb->used_size\n");
540 1.2.6.2 yamt goto eraseit;
541 1.2.6.2 yamt }
542 1.2.6.2 yamt
543 1.2.6.2 yamt nref = eb->gc_node;
544 1.2.6.2 yamt //dbg_gc("gc use: %u\n", chmp->chm_nextblock->lnr);
545 1.2.6.2 yamt //dbg_gc("nref: %u %u\n", nref->nref_lnr, nref->nref_offset);
546 1.2.6.2 yamt gcblock_dirty = eb->dirty_size;
547 1.2.6.2 yamt
548 1.2.6.2 yamt while(CHFS_REF_OBSOLETE(nref)) {
549 1.2.6.2 yamt //dbg_gc("obsoleted nref lnr: %u - offset: %u\n", nref->nref_lnr, nref->nref_offset);
550 1.2.6.2 yamt #ifdef DBG_MSG_GC
551 1.2.6.2 yamt if (nref == chmp->chm_blocks[nref->nref_lnr].last_node) {
552 1.2.6.2 yamt dbg_gc("THIS NODE IS THE LAST NODE OF ITS EB\n");
553 1.2.6.2 yamt }
554 1.2.6.2 yamt #endif
555 1.2.6.2 yamt nref = node_next(nref);
556 1.2.6.2 yamt if (!nref) {
557 1.2.6.2 yamt //dbg_gc("!nref\n");
558 1.2.6.2 yamt eb->gc_node = nref;
559 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
560 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_mountfields);
561 1.2.6.2 yamt panic("CHFS BUG - nref is NULL)\n");
562 1.2.6.2 yamt }
563 1.2.6.2 yamt }
564 1.2.6.2 yamt eb->gc_node = nref;
565 1.2.6.2 yamt //dbg_gc("nref the chosen one lnr: %u - offset: %u\n", nref->nref_lnr, nref->nref_offset);
566 1.2.6.2 yamt KASSERT(nref->nref_lnr == chmp->chm_gcblock->lnr);
567 1.2.6.2 yamt
568 1.2.6.2 yamt if (!nref->nref_next) {
569 1.2.6.2 yamt //dbg_gc("!nref->nref_next\n");
570 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
571 1.2.6.2 yamt if (CHFS_REF_FLAGS(nref) == CHFS_PRISTINE_NODE_MASK) {
572 1.2.6.2 yamt chfs_gcollect_pristine(chmp, eb, NULL, nref);
573 1.2.6.2 yamt } else {
574 1.2.6.2 yamt chfs_mark_node_obsolete(chmp, nref);
575 1.2.6.2 yamt }
576 1.2.6.2 yamt goto lock_size;
577 1.2.6.2 yamt }
578 1.2.6.2 yamt
579 1.2.6.2 yamt dbg_gc("nref lnr: %u - offset: %u\n", nref->nref_lnr, nref->nref_offset);
580 1.2.6.2 yamt vc = chfs_nref_to_vc(nref);
581 1.2.6.2 yamt
582 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
583 1.2.6.2 yamt
584 1.2.6.2 yamt //dbg_gc("enter vnocache lock on #%llu\n", vc->vno);
585 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_vnocache);
586 1.2.6.2 yamt
587 1.2.6.2 yamt dbg_gc("switch\n");
588 1.2.6.2 yamt switch(vc->state) {
589 1.2.6.2 yamt case VNO_STATE_CHECKEDABSENT:
590 1.2.6.2 yamt if (CHFS_REF_FLAGS(nref) == CHFS_PRISTINE_NODE_MASK) {
591 1.2.6.2 yamt chfs_vnode_cache_set_state(chmp, vc, VNO_STATE_GC);
592 1.2.6.2 yamt }
593 1.2.6.2 yamt break;
594 1.2.6.2 yamt
595 1.2.6.2 yamt case VNO_STATE_PRESENT:
596 1.2.6.2 yamt break;
597 1.2.6.2 yamt
598 1.2.6.2 yamt case VNO_STATE_UNCHECKED:
599 1.2.6.2 yamt case VNO_STATE_CHECKING:
600 1.2.6.2 yamt case VNO_STATE_GC:
601 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
602 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_mountfields);
603 1.2.6.2 yamt panic("CHFS BUG - vc state unchecked,"
604 1.2.6.2 yamt " checking or gc (vno #%llu, num #%d)\n",
605 1.2.6.2 yamt (unsigned long long)vc->vno, vc->state);
606 1.2.6.2 yamt
607 1.2.6.2 yamt case VNO_STATE_READING:
608 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
609 1.2.6.2 yamt /* XXX why do we need the delay here?! */
610 1.2.6.2 yamt kpause("chvncrea", true, mstohz(50), NULL);
611 1.2.6.2 yamt
612 1.2.6.2 yamt // sleep_on_spinunlock(&chmp->chm_lock_vnocache);
613 1.2.6.2 yamt // KASSERT(!mutex_owned(&chmp->chm_lock_vnocache));
614 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_mountfields);
615 1.2.6.2 yamt return 0;
616 1.2.6.2 yamt }
617 1.2.6.2 yamt
618 1.2.6.2 yamt if (vc->state == VNO_STATE_GC) {
619 1.2.6.2 yamt dbg_gc("vc->state == VNO_STATE_GC\n");
620 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
621 1.2.6.2 yamt ret = chfs_gcollect_pristine(chmp, eb, NULL, nref);
622 1.2.6.2 yamt
623 1.2.6.2 yamt // chfs_vnode_cache_state_set(chmp,
624 1.2.6.2 yamt // vc, VNO_STATE_CHECKEDABSENT);
625 1.2.6.2 yamt /* XXX locking? */
626 1.2.6.2 yamt vc->state = VNO_STATE_CHECKEDABSENT;
627 1.2.6.2 yamt //TODO wake_up(&chmp->chm_vnocache_wq);
628 1.2.6.2 yamt if (ret != EBADF)
629 1.2.6.2 yamt goto test_gcnode;
630 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_vnocache);
631 1.2.6.2 yamt }
632 1.2.6.2 yamt
633 1.2.6.2 yamt vno = vc->vno;
634 1.2.6.2 yamt pvno = vc->pvno;
635 1.2.6.2 yamt nlink = vc->nlink;
636 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_vnocache);
637 1.2.6.2 yamt
638 1.2.6.2 yamt ip = chfs_gc_fetch_inode(chmp, vno, !(pvno | nlink));
639 1.2.6.2 yamt
640 1.2.6.2 yamt if (!ip) {
641 1.2.6.2 yamt dbg_gc("!ip\n");
642 1.2.6.2 yamt ret = 0;
643 1.2.6.2 yamt goto lock_size;
644 1.2.6.2 yamt }
645 1.2.6.2 yamt
646 1.2.6.2 yamt chfs_gcollect_live(chmp, eb, nref, ip);
647 1.2.6.2 yamt
648 1.2.6.2 yamt chfs_gc_release_inode(chmp, ip);
649 1.2.6.2 yamt
650 1.2.6.2 yamt test_gcnode:
651 1.2.6.2 yamt if (eb->dirty_size == gcblock_dirty &&
652 1.2.6.2 yamt !CHFS_REF_OBSOLETE(eb->gc_node)) {
653 1.2.6.2 yamt dbg_gc("ERROR collecting node at %u failed.\n",
654 1.2.6.2 yamt CHFS_GET_OFS(eb->gc_node->nref_offset));
655 1.2.6.2 yamt
656 1.2.6.2 yamt ret = ENOSPC;
657 1.2.6.2 yamt }
658 1.2.6.2 yamt
659 1.2.6.2 yamt lock_size:
660 1.2.6.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
661 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_sizes);
662 1.2.6.2 yamt eraseit:
663 1.2.6.2 yamt dbg_gc("eraseit\n");
664 1.2.6.2 yamt
665 1.2.6.2 yamt if (chmp->chm_gcblock) {
666 1.2.6.2 yamt dbg_gc("eb used size = %u\n", chmp->chm_gcblock->used_size);
667 1.2.6.2 yamt dbg_gc("eb free size = %u\n", chmp->chm_gcblock->free_size);
668 1.2.6.2 yamt dbg_gc("eb dirty size = %u\n", chmp->chm_gcblock->dirty_size);
669 1.2.6.2 yamt dbg_gc("eb unchecked size = %u\n",
670 1.2.6.2 yamt chmp->chm_gcblock->unchecked_size);
671 1.2.6.2 yamt dbg_gc("eb wasted size = %u\n", chmp->chm_gcblock->wasted_size);
672 1.2.6.2 yamt
673 1.2.6.2 yamt KASSERT(chmp->chm_gcblock->used_size + chmp->chm_gcblock->free_size +
674 1.2.6.2 yamt chmp->chm_gcblock->dirty_size +
675 1.2.6.2 yamt chmp->chm_gcblock->unchecked_size +
676 1.2.6.2 yamt chmp->chm_gcblock->wasted_size == chmp->chm_ebh->eb_size);
677 1.2.6.2 yamt
678 1.2.6.2 yamt }
679 1.2.6.2 yamt
680 1.2.6.2 yamt if (chmp->chm_gcblock && chmp->chm_gcblock->dirty_size +
681 1.2.6.2 yamt chmp->chm_gcblock->wasted_size == chmp->chm_ebh->eb_size) {
682 1.2.6.2 yamt dbg_gc("Block at leb #%u completely obsoleted by GC, "
683 1.2.6.2 yamt "Moving to erase_pending_queue\n", chmp->chm_gcblock->lnr);
684 1.2.6.2 yamt TAILQ_INSERT_TAIL(&chmp->chm_erase_pending_queue,
685 1.2.6.2 yamt chmp->chm_gcblock, queue);
686 1.2.6.2 yamt chmp->chm_gcblock = NULL;
687 1.2.6.2 yamt chmp->chm_nr_erasable_blocks++;
688 1.2.6.2 yamt if (!TAILQ_EMPTY(&chmp->chm_erase_pending_queue)) {
689 1.2.6.2 yamt ret = chfs_remap_leb(chmp);
690 1.2.6.2 yamt }
691 1.2.6.2 yamt }
692 1.2.6.2 yamt
693 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
694 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_mountfields);
695 1.2.6.2 yamt dbg_gc("return\n");
696 1.2.6.2 yamt return ret;
697 1.2.6.2 yamt }
698 1.2.6.2 yamt
699 1.2.6.2 yamt
700 1.2.6.2 yamt int
701 1.2.6.2 yamt chfs_gcollect_pristine(struct chfs_mount *chmp, struct chfs_eraseblock *cheb,
702 1.2.6.2 yamt struct chfs_vnode_cache *chvc, struct chfs_node_ref *nref)
703 1.2.6.2 yamt {
704 1.2.6.2 yamt struct chfs_node_ref *newnref;
705 1.2.6.2 yamt struct chfs_flash_node_hdr *nhdr;
706 1.2.6.2 yamt struct chfs_flash_vnode *fvnode;
707 1.2.6.2 yamt struct chfs_flash_dirent_node *fdirent;
708 1.2.6.2 yamt struct chfs_flash_data_node *fdata;
709 1.2.6.2 yamt int ret, retries = 0;
710 1.2.6.2 yamt uint32_t ofs, crc;
711 1.2.6.2 yamt size_t totlen = chfs_nref_len(chmp, cheb, nref);
712 1.2.6.2 yamt char *data;
713 1.2.6.2 yamt struct iovec vec;
714 1.2.6.2 yamt size_t retlen;
715 1.2.6.2 yamt
716 1.2.6.2 yamt dbg_gc("gcollect_pristine\n");
717 1.2.6.2 yamt
718 1.2.6.2 yamt data = kmem_alloc(totlen, KM_SLEEP);
719 1.2.6.2 yamt if (!data)
720 1.2.6.2 yamt return ENOMEM;
721 1.2.6.2 yamt
722 1.2.6.2 yamt ofs = CHFS_GET_OFS(nref->nref_offset);
723 1.2.6.2 yamt
724 1.2.6.2 yamt ret = chfs_read_leb(chmp, nref->nref_lnr, data, ofs, totlen, &retlen);
725 1.2.6.2 yamt if (ret) {
726 1.2.6.2 yamt dbg_gc("reading error\n");
727 1.2.6.2 yamt return ret;
728 1.2.6.2 yamt }
729 1.2.6.2 yamt if (retlen != totlen) {
730 1.2.6.2 yamt dbg_gc("read size error\n");
731 1.2.6.2 yamt return EIO;
732 1.2.6.2 yamt }
733 1.2.6.2 yamt nhdr = (struct chfs_flash_node_hdr *)data;
734 1.2.6.2 yamt /* check the header */
735 1.2.6.2 yamt if (le16toh(nhdr->magic) != CHFS_FS_MAGIC_BITMASK) {
736 1.2.6.2 yamt dbg_gc("node header magic number error\n");
737 1.2.6.2 yamt return EBADF;
738 1.2.6.2 yamt }
739 1.2.6.2 yamt crc = crc32(0, (uint8_t *)nhdr, CHFS_NODE_HDR_SIZE - 4);
740 1.2.6.2 yamt if (crc != le32toh(nhdr->hdr_crc)) {
741 1.2.6.2 yamt dbg_gc("node header crc error\n");
742 1.2.6.2 yamt return EBADF;
743 1.2.6.2 yamt }
744 1.2.6.2 yamt
745 1.2.6.2 yamt switch(le16toh(nhdr->type)) {
746 1.2.6.2 yamt case CHFS_NODETYPE_VNODE:
747 1.2.6.2 yamt fvnode = (struct chfs_flash_vnode *)data;
748 1.2.6.2 yamt crc = crc32(0, (uint8_t *)fvnode, sizeof(struct chfs_flash_vnode) - 4);
749 1.2.6.2 yamt if (crc != le32toh(fvnode->node_crc)) {
750 1.2.6.2 yamt dbg_gc("vnode crc error\n");
751 1.2.6.2 yamt return EBADF;
752 1.2.6.2 yamt }
753 1.2.6.2 yamt break;
754 1.2.6.2 yamt case CHFS_NODETYPE_DIRENT:
755 1.2.6.2 yamt fdirent = (struct chfs_flash_dirent_node *)data;
756 1.2.6.2 yamt crc = crc32(0, (uint8_t *)fdirent, sizeof(struct chfs_flash_dirent_node) - 4);
757 1.2.6.2 yamt if (crc != le32toh(fdirent->node_crc)) {
758 1.2.6.2 yamt dbg_gc("dirent crc error\n");
759 1.2.6.2 yamt return EBADF;
760 1.2.6.2 yamt }
761 1.2.6.2 yamt crc = crc32(0, fdirent->name, fdirent->nsize);
762 1.2.6.2 yamt if (crc != le32toh(fdirent->name_crc)) {
763 1.2.6.2 yamt dbg_gc("dirent name crc error\n");
764 1.2.6.2 yamt return EBADF;
765 1.2.6.2 yamt }
766 1.2.6.2 yamt break;
767 1.2.6.2 yamt case CHFS_NODETYPE_DATA:
768 1.2.6.2 yamt fdata = (struct chfs_flash_data_node *)data;
769 1.2.6.2 yamt crc = crc32(0, (uint8_t *)fdata, sizeof(struct chfs_flash_data_node) - 4);
770 1.2.6.2 yamt if (crc != le32toh(fdata->node_crc)) {
771 1.2.6.2 yamt dbg_gc("data node crc error\n");
772 1.2.6.2 yamt return EBADF;
773 1.2.6.2 yamt }
774 1.2.6.2 yamt break;
775 1.2.6.2 yamt default:
776 1.2.6.2 yamt if (chvc) {
777 1.2.6.2 yamt dbg_gc("unknown node have vnode cache\n");
778 1.2.6.2 yamt return EBADF;
779 1.2.6.2 yamt }
780 1.2.6.2 yamt }
781 1.2.6.2 yamt /* CRC's OK, write node to its new place */
782 1.2.6.2 yamt retry:
783 1.2.6.2 yamt ret = chfs_reserve_space_gc(chmp, totlen);
784 1.2.6.2 yamt if (ret)
785 1.2.6.2 yamt return ret;
786 1.2.6.2 yamt
787 1.2.6.2 yamt newnref = chfs_alloc_node_ref(chmp->chm_nextblock);
788 1.2.6.2 yamt if (!newnref)
789 1.2.6.2 yamt return ENOMEM;
790 1.2.6.2 yamt
791 1.2.6.2 yamt ofs = chmp->chm_ebh->eb_size - chmp->chm_nextblock->free_size;
792 1.2.6.2 yamt newnref->nref_offset = ofs;
793 1.2.6.2 yamt
794 1.2.6.2 yamt vec.iov_base = (void *)data;
795 1.2.6.2 yamt vec.iov_len = totlen;
796 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_sizes);
797 1.2.6.2 yamt ret = chfs_write_wbuf(chmp, &vec, 1, ofs, &retlen);
798 1.2.6.2 yamt
799 1.2.6.2 yamt if (ret || retlen != totlen) {
800 1.2.6.2 yamt chfs_err("error while writing out to the media\n");
801 1.2.6.2 yamt chfs_err("err: %d | size: %zu | retlen : %zu\n",
802 1.2.6.2 yamt ret, totlen, retlen);
803 1.2.6.2 yamt
804 1.2.6.2 yamt chfs_change_size_dirty(chmp, chmp->chm_nextblock, totlen);
805 1.2.6.2 yamt if (retries) {
806 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
807 1.2.6.2 yamt return EIO;
808 1.2.6.2 yamt }
809 1.2.6.2 yamt
810 1.2.6.2 yamt retries++;
811 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
812 1.2.6.2 yamt goto retry;
813 1.2.6.2 yamt }
814 1.2.6.2 yamt
815 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
816 1.2.6.2 yamt //TODO should we set free_size?
817 1.2.6.2 yamt chfs_mark_node_obsolete(chmp, nref);
818 1.2.6.2 yamt chfs_add_vnode_ref_to_vc(chmp, chvc, newnref);
819 1.2.6.2 yamt return 0;
820 1.2.6.2 yamt }
821 1.2.6.2 yamt
822 1.2.6.2 yamt
823 1.2.6.2 yamt int
824 1.2.6.2 yamt chfs_gcollect_live(struct chfs_mount *chmp,
825 1.2.6.2 yamt struct chfs_eraseblock *cheb, struct chfs_node_ref *nref,
826 1.2.6.2 yamt struct chfs_inode *ip)
827 1.2.6.2 yamt {
828 1.2.6.2 yamt struct chfs_node_frag *frag;
829 1.2.6.2 yamt struct chfs_full_dnode *fn = NULL;
830 1.2.6.2 yamt int start = 0, end = 0, nrfrags = 0;
831 1.2.6.2 yamt struct chfs_dirent *fd = NULL;
832 1.2.6.2 yamt int ret = 0;
833 1.2.6.2 yamt bool is_dirent;
834 1.2.6.2 yamt
835 1.2.6.2 yamt dbg_gc("gcollect_live\n");
836 1.2.6.2 yamt
837 1.2.6.2 yamt if (chmp->chm_gcblock != cheb) {
838 1.2.6.2 yamt dbg_gc("GC block is no longer gcblock. Restart.\n");
839 1.2.6.2 yamt goto upnout;
840 1.2.6.2 yamt }
841 1.2.6.2 yamt
842 1.2.6.2 yamt if (CHFS_REF_OBSOLETE(nref)) {
843 1.2.6.2 yamt dbg_gc("node to be GC'd was obsoleted in the meantime.\n");
844 1.2.6.2 yamt goto upnout;
845 1.2.6.2 yamt }
846 1.2.6.2 yamt
847 1.2.6.2 yamt /* It's a vnode? */
848 1.2.6.2 yamt if (ip->chvc->v == nref) {
849 1.2.6.2 yamt chfs_gcollect_vnode(chmp, ip);
850 1.2.6.2 yamt goto upnout;
851 1.2.6.2 yamt }
852 1.2.6.2 yamt
853 1.2.6.2 yamt /* find fn */
854 1.2.6.2 yamt dbg_gc("find full dnode\n");
855 1.2.6.2 yamt for(frag = frag_first(&ip->fragtree);
856 1.2.6.2 yamt frag; frag = frag_next(&ip->fragtree, frag)) {
857 1.2.6.2 yamt if (frag->node && frag->node->nref == nref) {
858 1.2.6.2 yamt fn = frag->node;
859 1.2.6.2 yamt end = frag->ofs + frag->size;
860 1.2.6.2 yamt if (!nrfrags++)
861 1.2.6.2 yamt start = frag->ofs;
862 1.2.6.2 yamt if (nrfrags == frag->node->frags)
863 1.2.6.2 yamt break;
864 1.2.6.2 yamt }
865 1.2.6.2 yamt }
866 1.2.6.2 yamt
867 1.2.6.2 yamt /* It's a pristine node, or dnode (or hole? XXX have we hole nodes?) */
868 1.2.6.2 yamt if (fn) {
869 1.2.6.2 yamt if (CHFS_REF_FLAGS(nref) == CHFS_PRISTINE_NODE_MASK) {
870 1.2.6.2 yamt ret = chfs_gcollect_pristine(chmp,
871 1.2.6.2 yamt cheb, ip->chvc, nref);
872 1.2.6.2 yamt if (!ret) {
873 1.2.6.2 yamt frag->node->nref = ip->chvc->v;
874 1.2.6.2 yamt }
875 1.2.6.2 yamt if (ret != EBADF)
876 1.2.6.2 yamt goto upnout;
877 1.2.6.2 yamt }
878 1.2.6.2 yamt //ret = chfs_gcollect_hole(chmp, cheb, ip, fn, start, end);
879 1.2.6.2 yamt ret = chfs_gcollect_dnode(chmp, cheb, ip, fn, start, end);
880 1.2.6.2 yamt goto upnout;
881 1.2.6.2 yamt }
882 1.2.6.2 yamt
883 1.2.6.2 yamt
884 1.2.6.2 yamt /* It's a dirent? */
885 1.2.6.2 yamt dbg_gc("find full dirent\n");
886 1.2.6.2 yamt is_dirent = false;
887 1.2.6.2 yamt TAILQ_FOREACH(fd, &ip->dents, fds) {
888 1.2.6.2 yamt if (fd->nref == nref) {
889 1.2.6.2 yamt is_dirent = true;
890 1.2.6.2 yamt break;
891 1.2.6.2 yamt }
892 1.2.6.2 yamt }
893 1.2.6.2 yamt
894 1.2.6.2 yamt if (is_dirent && fd->vno) {
895 1.2.6.2 yamt ret = chfs_gcollect_dirent(chmp, cheb, ip, fd);
896 1.2.6.2 yamt } else if (is_dirent) {
897 1.2.6.2 yamt ret = chfs_gcollect_deletion_dirent(chmp, cheb, ip, fd);
898 1.2.6.2 yamt } else {
899 1.2.6.2 yamt dbg_gc("Nref at leb #%u offset 0x%08x wasn't in node list"
900 1.2.6.2 yamt " for ino #%llu\n",
901 1.2.6.2 yamt nref->nref_lnr, CHFS_GET_OFS(nref->nref_offset),
902 1.2.6.2 yamt (unsigned long long)ip->ino);
903 1.2.6.2 yamt if (CHFS_REF_OBSOLETE(nref)) {
904 1.2.6.2 yamt dbg_gc("But it's obsolete so we don't mind"
905 1.2.6.2 yamt " too much.\n");
906 1.2.6.2 yamt }
907 1.2.6.2 yamt }
908 1.2.6.2 yamt
909 1.2.6.2 yamt upnout:
910 1.2.6.2 yamt return ret;
911 1.2.6.2 yamt }
912 1.2.6.2 yamt
913 1.2.6.2 yamt int
914 1.2.6.2 yamt chfs_gcollect_vnode(struct chfs_mount *chmp, struct chfs_inode *ip)
915 1.2.6.2 yamt {
916 1.2.6.2 yamt int ret;
917 1.2.6.2 yamt dbg_gc("gcollect_vnode\n");
918 1.2.6.2 yamt
919 1.2.6.2 yamt ret = chfs_write_flash_vnode(chmp, ip, ALLOC_GC);
920 1.2.6.2 yamt
921 1.2.6.2 yamt return ret;
922 1.2.6.2 yamt }
923 1.2.6.2 yamt
924 1.2.6.2 yamt int
925 1.2.6.2 yamt chfs_gcollect_dirent(struct chfs_mount *chmp,
926 1.2.6.2 yamt struct chfs_eraseblock *cheb, struct chfs_inode *parent,
927 1.2.6.2 yamt struct chfs_dirent *fd)
928 1.2.6.2 yamt {
929 1.2.6.2 yamt struct vnode *vnode = NULL;
930 1.2.6.2 yamt struct chfs_inode *ip;
931 1.2.6.2 yamt struct chfs_node_ref *prev;
932 1.2.6.2 yamt dbg_gc("gcollect_dirent\n");
933 1.2.6.2 yamt
934 1.2.6.2 yamt vnode = chfs_vnode_lookup(chmp, fd->vno);
935 1.2.6.2 yamt
936 1.2.6.2 yamt /* XXX maybe KASSERT or panic on this? */
937 1.2.6.2 yamt if (vnode == NULL) {
938 1.2.6.2 yamt return ENOENT;
939 1.2.6.2 yamt }
940 1.2.6.2 yamt
941 1.2.6.2 yamt ip = VTOI(vnode);
942 1.2.6.2 yamt
943 1.2.6.2 yamt prev = parent->chvc->dirents;
944 1.2.6.2 yamt if (prev == fd->nref) {
945 1.2.6.2 yamt parent->chvc->dirents = prev->nref_next;
946 1.2.6.2 yamt dbg_gc("fd nref removed from dirents list\n");
947 1.2.6.2 yamt prev = NULL;
948 1.2.6.2 yamt }
949 1.2.6.2 yamt while (prev) {
950 1.2.6.2 yamt if (prev->nref_next == fd->nref) {
951 1.2.6.2 yamt prev->nref_next = fd->nref->nref_next;
952 1.2.6.2 yamt dbg_gc("fd nref removed from dirents list\n");
953 1.2.6.2 yamt break;
954 1.2.6.2 yamt }
955 1.2.6.2 yamt prev = prev->nref_next;
956 1.2.6.2 yamt }
957 1.2.6.2 yamt
958 1.2.6.2 yamt prev = fd->nref;
959 1.2.6.2 yamt chfs_mark_node_obsolete(chmp, fd->nref);
960 1.2.6.2 yamt return chfs_write_flash_dirent(chmp,
961 1.2.6.2 yamt parent, ip, fd, fd->vno, ALLOC_GC);
962 1.2.6.2 yamt }
963 1.2.6.2 yamt
964 1.2.6.2 yamt /* Check dirents what are marked as deleted. */
965 1.2.6.2 yamt int
966 1.2.6.2 yamt chfs_gcollect_deletion_dirent(struct chfs_mount *chmp,
967 1.2.6.2 yamt struct chfs_eraseblock *cheb, struct chfs_inode *parent,
968 1.2.6.2 yamt struct chfs_dirent *fd)
969 1.2.6.2 yamt {
970 1.2.6.2 yamt struct chfs_flash_dirent_node chfdn;
971 1.2.6.2 yamt struct chfs_node_ref *nref;
972 1.2.6.2 yamt size_t retlen, name_len, nref_len;
973 1.2.6.2 yamt uint32_t name_crc;
974 1.2.6.2 yamt
975 1.2.6.2 yamt int ret;
976 1.2.6.2 yamt
977 1.2.6.2 yamt struct vnode *vnode = NULL;
978 1.2.6.2 yamt
979 1.2.6.2 yamt dbg_gc("gcollect_deletion_dirent\n");
980 1.2.6.2 yamt
981 1.2.6.2 yamt name_len = strlen(fd->name);
982 1.2.6.2 yamt name_crc = crc32(0, fd->name, name_len);
983 1.2.6.2 yamt
984 1.2.6.2 yamt nref_len = chfs_nref_len(chmp, cheb, fd->nref);
985 1.2.6.2 yamt
986 1.2.6.2 yamt vnode = chfs_vnode_lookup(chmp, fd->vno);
987 1.2.6.2 yamt
988 1.2.6.2 yamt //dbg_gc("ip from vnode\n");
989 1.2.6.2 yamt //VFS_VGET(chmp->chm_fsmp, fd->vno, &vnode);
990 1.2.6.2 yamt //ip = VTOI(vnode);
991 1.2.6.2 yamt //vput(vnode);
992 1.2.6.2 yamt
993 1.2.6.2 yamt //dbg_gc("mutex enter erase_completion_lock\n");
994 1.2.6.2 yamt
995 1.2.6.2 yamt // dbg_gc("alloc chfdn\n");
996 1.2.6.2 yamt // chfdn = kmem_alloc(nref_len, KM_SLEEP);
997 1.2.6.2 yamt // if (!chfdn)
998 1.2.6.2 yamt // return ENOMEM;
999 1.2.6.2 yamt
1000 1.2.6.2 yamt for (nref = parent->chvc->dirents;
1001 1.2.6.2 yamt nref != (void*)parent->chvc;
1002 1.2.6.2 yamt nref = nref->nref_next) {
1003 1.2.6.2 yamt
1004 1.2.6.2 yamt if (!CHFS_REF_OBSOLETE(nref))
1005 1.2.6.2 yamt continue;
1006 1.2.6.2 yamt
1007 1.2.6.2 yamt /* if node refs have different length, skip */
1008 1.2.6.2 yamt if (chfs_nref_len(chmp, NULL, nref) != nref_len)
1009 1.2.6.2 yamt continue;
1010 1.2.6.2 yamt
1011 1.2.6.2 yamt if (CHFS_GET_OFS(nref->nref_offset) ==
1012 1.2.6.2 yamt CHFS_GET_OFS(fd->nref->nref_offset)) {
1013 1.2.6.2 yamt continue;
1014 1.2.6.2 yamt }
1015 1.2.6.2 yamt
1016 1.2.6.2 yamt ret = chfs_read_leb(chmp,
1017 1.2.6.2 yamt nref->nref_lnr, (void*)&chfdn, CHFS_GET_OFS(nref->nref_offset),
1018 1.2.6.2 yamt nref_len, &retlen);
1019 1.2.6.2 yamt
1020 1.2.6.2 yamt if (ret) {
1021 1.2.6.2 yamt dbg_gc("Read error: %d\n", ret);
1022 1.2.6.2 yamt continue;
1023 1.2.6.2 yamt }
1024 1.2.6.2 yamt
1025 1.2.6.2 yamt if (retlen != nref_len) {
1026 1.2.6.2 yamt dbg_gc("Error reading node:"
1027 1.2.6.2 yamt " read: %zu insted of: %zu\n", retlen, nref_len);
1028 1.2.6.2 yamt continue;
1029 1.2.6.2 yamt }
1030 1.2.6.2 yamt
1031 1.2.6.2 yamt /* if node type doesn't match, skip */
1032 1.2.6.2 yamt if (le16toh(chfdn.type) != CHFS_NODETYPE_DIRENT)
1033 1.2.6.2 yamt continue;
1034 1.2.6.2 yamt
1035 1.2.6.2 yamt /* if crc doesn't match, skip */
1036 1.2.6.2 yamt if (le32toh(chfdn.name_crc) != name_crc)
1037 1.2.6.2 yamt continue;
1038 1.2.6.2 yamt
1039 1.2.6.2 yamt /* if length of name different, or this is an another deletion
1040 1.2.6.2 yamt * dirent, skip
1041 1.2.6.2 yamt */
1042 1.2.6.2 yamt if (chfdn.nsize != name_len || !le64toh(chfdn.vno))
1043 1.2.6.2 yamt continue;
1044 1.2.6.2 yamt
1045 1.2.6.2 yamt /* check actual name */
1046 1.2.6.2 yamt if (memcmp(chfdn.name, fd->name, name_len))
1047 1.2.6.2 yamt continue;
1048 1.2.6.2 yamt
1049 1.2.6.2 yamt // kmem_free(chfdn, nref_len);
1050 1.2.6.2 yamt
1051 1.2.6.2 yamt chfs_mark_node_obsolete(chmp, fd->nref);
1052 1.2.6.2 yamt return chfs_write_flash_dirent(chmp,
1053 1.2.6.2 yamt parent, NULL, fd, fd->vno, ALLOC_GC);
1054 1.2.6.2 yamt }
1055 1.2.6.2 yamt
1056 1.2.6.2 yamt // kmem_free(chfdn, nref_len);
1057 1.2.6.2 yamt
1058 1.2.6.2 yamt TAILQ_REMOVE(&parent->dents, fd, fds);
1059 1.2.6.2 yamt chfs_free_dirent(fd);
1060 1.2.6.2 yamt return 0;
1061 1.2.6.2 yamt }
1062 1.2.6.2 yamt
1063 1.2.6.2 yamt int
1064 1.2.6.2 yamt chfs_gcollect_dnode(struct chfs_mount *chmp,
1065 1.2.6.2 yamt struct chfs_eraseblock *orig_cheb, struct chfs_inode *ip,
1066 1.2.6.2 yamt struct chfs_full_dnode *fn, uint32_t orig_start, uint32_t orig_end)
1067 1.2.6.2 yamt {
1068 1.2.6.2 yamt struct chfs_node_ref *nref, *prev;
1069 1.2.6.2 yamt struct chfs_full_dnode *newfn;
1070 1.2.6.2 yamt struct chfs_flash_data_node *fdnode;
1071 1.2.6.2 yamt int ret = 0, retries = 0;
1072 1.2.6.2 yamt uint32_t totlen;
1073 1.2.6.2 yamt char *data = NULL;
1074 1.2.6.2 yamt struct iovec vec;
1075 1.2.6.2 yamt size_t retlen;
1076 1.2.6.2 yamt dbg_gc("gcollect_dnode\n");
1077 1.2.6.2 yamt
1078 1.2.6.2 yamt //uint32_t used_size;
1079 1.2.6.2 yamt
1080 1.2.6.2 yamt /* TODO GC merging frags, should we use it?
1081 1.2.6.2 yamt
1082 1.2.6.2 yamt uint32_t start, end;
1083 1.2.6.2 yamt
1084 1.2.6.2 yamt start = orig_start;
1085 1.2.6.2 yamt end = orig_end;
1086 1.2.6.2 yamt
1087 1.2.6.2 yamt if (chmp->chm_nr_free_blocks + chmp->chm_nr_erasable_blocks > chmp->chm_resv_blocks_gcmerge) {
1088 1.2.6.2 yamt struct chfs_node_frag *frag;
1089 1.2.6.2 yamt uint32_t min, max;
1090 1.2.6.2 yamt
1091 1.2.6.2 yamt min = start & (PAGE_CACHE_SIZE-1);
1092 1.2.6.2 yamt max = min + PAGE_CACHE_SIZE;
1093 1.2.6.2 yamt
1094 1.2.6.2 yamt frag = (struct chfs_node_frag *)rb_tree_find_node_leq(&ip->i_chfs_ext.fragtree, &start);
1095 1.2.6.2 yamt KASSERT(frag->ofs == start);
1096 1.2.6.2 yamt
1097 1.2.6.2 yamt while ((frag = frag_prev(&ip->i_chfs_ext.fragtree, frag)) && frag->ofs >= min) {
1098 1.2.6.2 yamt if (frag->ofs > min) {
1099 1.2.6.2 yamt start = frag->ofs;
1100 1.2.6.2 yamt continue;
1101 1.2.6.2 yamt }
1102 1.2.6.2 yamt
1103 1.2.6.2 yamt if (!frag->node || !frag->node->nref) {
1104 1.2.6.2 yamt break;
1105 1.2.6.2 yamt } else {
1106 1.2.6.2 yamt struct chfs_node_ref *nref = frag->node->nref;
1107 1.2.6.2 yamt struct chfs_eraseblock *cheb;
1108 1.2.6.2 yamt
1109 1.2.6.2 yamt cheb = &chmp->chm_blocks[nref->nref_lnr];
1110 1.2.6.2 yamt
1111 1.2.6.2 yamt if (cheb == chmp->chm_gcblock)
1112 1.2.6.2 yamt start = frag->ofs;
1113 1.2.6.2 yamt
1114 1.2.6.2 yamt //TODO is this a clean block?
1115 1.2.6.2 yamt
1116 1.2.6.2 yamt start = frag->ofs;
1117 1.2.6.2 yamt break;
1118 1.2.6.2 yamt }
1119 1.2.6.2 yamt }
1120 1.2.6.2 yamt
1121 1.2.6.2 yamt end--;
1122 1.2.6.2 yamt frag = (struct chfs_node_frag *)rb_tree_find_node_leq(&ip->i_chfs_ext.fragtree, &(end));
1123 1.2.6.2 yamt
1124 1.2.6.2 yamt while ((frag = frag_next(&ip->i_chfs_ext.fragtree, frag)) && (frag->ofs + frag->size <= max)) {
1125 1.2.6.2 yamt if (frag->ofs + frag->size < max) {
1126 1.2.6.2 yamt end = frag->ofs + frag->size;
1127 1.2.6.2 yamt continue;
1128 1.2.6.2 yamt }
1129 1.2.6.2 yamt
1130 1.2.6.2 yamt if (!frag->node || !frag->node->nref) {
1131 1.2.6.2 yamt break;
1132 1.2.6.2 yamt } else {
1133 1.2.6.2 yamt struct chfs_node_ref *nref = frag->node->nref;
1134 1.2.6.2 yamt struct chfs_eraseblock *cheb;
1135 1.2.6.2 yamt
1136 1.2.6.2 yamt cheb = &chmp->chm_blocks[nref->nref_lnr];
1137 1.2.6.2 yamt
1138 1.2.6.2 yamt if (cheb == chmp->chm_gcblock)
1139 1.2.6.2 yamt end = frag->ofs + frag->size;
1140 1.2.6.2 yamt
1141 1.2.6.2 yamt //TODO is this a clean block?
1142 1.2.6.2 yamt
1143 1.2.6.2 yamt end = frag->ofs + frag->size;
1144 1.2.6.2 yamt break;
1145 1.2.6.2 yamt }
1146 1.2.6.2 yamt }
1147 1.2.6.2 yamt
1148 1.2.6.2 yamt KASSERT(end <=
1149 1.2.6.2 yamt frag_last(&ip->i_chfs_ext.fragtree)->ofs +
1150 1.2.6.2 yamt frag_last(&ip->i_chfs_ext.fragtree)->size);
1151 1.2.6.2 yamt KASSERT(end >= orig_end);
1152 1.2.6.2 yamt KASSERT(start <= orig_start);
1153 1.2.6.2 yamt }
1154 1.2.6.2 yamt */
1155 1.2.6.2 yamt KASSERT(orig_cheb->lnr == fn->nref->nref_lnr);
1156 1.2.6.2 yamt totlen = chfs_nref_len(chmp, orig_cheb, fn->nref);
1157 1.2.6.2 yamt data = kmem_alloc(totlen, KM_SLEEP);
1158 1.2.6.2 yamt
1159 1.2.6.2 yamt ret = chfs_read_leb(chmp, fn->nref->nref_lnr, data, fn->nref->nref_offset,
1160 1.2.6.2 yamt totlen, &retlen);
1161 1.2.6.2 yamt
1162 1.2.6.2 yamt fdnode = (struct chfs_flash_data_node *)data;
1163 1.2.6.2 yamt fdnode->version = htole64(++ip->chvc->highest_version);
1164 1.2.6.2 yamt fdnode->node_crc = htole32(crc32(0, (uint8_t *)fdnode,
1165 1.2.6.2 yamt sizeof(*fdnode) - 4));
1166 1.2.6.2 yamt
1167 1.2.6.2 yamt vec.iov_base = (void *)data;
1168 1.2.6.2 yamt vec.iov_len = totlen;
1169 1.2.6.2 yamt
1170 1.2.6.2 yamt retry:
1171 1.2.6.2 yamt ret = chfs_reserve_space_gc(chmp, totlen);
1172 1.2.6.2 yamt if (ret)
1173 1.2.6.2 yamt goto out;
1174 1.2.6.2 yamt
1175 1.2.6.2 yamt nref = chfs_alloc_node_ref(chmp->chm_nextblock);
1176 1.2.6.2 yamt if (!nref) {
1177 1.2.6.2 yamt ret = ENOMEM;
1178 1.2.6.2 yamt goto out;
1179 1.2.6.2 yamt }
1180 1.2.6.2 yamt
1181 1.2.6.2 yamt mutex_enter(&chmp->chm_lock_sizes);
1182 1.2.6.2 yamt
1183 1.2.6.2 yamt nref->nref_offset = chmp->chm_ebh->eb_size - chmp->chm_nextblock->free_size;
1184 1.2.6.2 yamt KASSERT(nref->nref_offset % 4 == 0);
1185 1.2.6.2 yamt chfs_change_size_free(chmp, chmp->chm_nextblock, -totlen);
1186 1.2.6.2 yamt
1187 1.2.6.2 yamt ret = chfs_write_wbuf(chmp, &vec, 1, nref->nref_offset, &retlen);
1188 1.2.6.2 yamt if (ret || retlen != totlen) {
1189 1.2.6.2 yamt chfs_err("error while writing out to the media\n");
1190 1.2.6.2 yamt chfs_err("err: %d | size: %d | retlen : %zu\n",
1191 1.2.6.2 yamt ret, totlen, retlen);
1192 1.2.6.2 yamt chfs_change_size_dirty(chmp, chmp->chm_nextblock, totlen);
1193 1.2.6.2 yamt if (retries) {
1194 1.2.6.2 yamt ret = EIO;
1195 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
1196 1.2.6.2 yamt goto out;
1197 1.2.6.2 yamt }
1198 1.2.6.2 yamt
1199 1.2.6.2 yamt retries++;
1200 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
1201 1.2.6.2 yamt goto retry;
1202 1.2.6.2 yamt }
1203 1.2.6.2 yamt
1204 1.2.6.2 yamt dbg_gc("new nref lnr: %u - offset: %u\n", nref->nref_lnr, nref->nref_offset);
1205 1.2.6.2 yamt
1206 1.2.6.2 yamt chfs_change_size_used(chmp, &chmp->chm_blocks[nref->nref_lnr], totlen);
1207 1.2.6.2 yamt mutex_exit(&chmp->chm_lock_sizes);
1208 1.2.6.2 yamt KASSERT(chmp->chm_blocks[nref->nref_lnr].used_size <= chmp->chm_ebh->eb_size);
1209 1.2.6.2 yamt
1210 1.2.6.2 yamt newfn = chfs_alloc_full_dnode();
1211 1.2.6.2 yamt newfn->nref = nref;
1212 1.2.6.2 yamt newfn->ofs = fn->ofs;
1213 1.2.6.2 yamt newfn->size = fn->size;
1214 1.2.6.2 yamt newfn->frags = fn->frags;
1215 1.2.6.2 yamt
1216 1.2.6.2 yamt //TODO should we remove fd from dnode list?
1217 1.2.6.2 yamt
1218 1.2.6.2 yamt prev = ip->chvc->dnode;
1219 1.2.6.2 yamt if (prev == fn->nref) {
1220 1.2.6.2 yamt ip->chvc->dnode = prev->nref_next;
1221 1.2.6.2 yamt prev = NULL;
1222 1.2.6.2 yamt }
1223 1.2.6.2 yamt while (prev) {
1224 1.2.6.2 yamt if (prev->nref_next == fn->nref) {
1225 1.2.6.2 yamt prev->nref_next = fn->nref->nref_next;
1226 1.2.6.2 yamt break;
1227 1.2.6.2 yamt }
1228 1.2.6.2 yamt prev = prev->nref_next;
1229 1.2.6.2 yamt }
1230 1.2.6.2 yamt
1231 1.2.6.2 yamt chfs_add_full_dnode_to_inode(chmp, ip, newfn);
1232 1.2.6.2 yamt chfs_add_node_to_list(chmp,
1233 1.2.6.2 yamt ip->chvc, newfn->nref, &ip->chvc->dnode);
1234 1.2.6.2 yamt
1235 1.2.6.2 yamt out:
1236 1.2.6.2 yamt kmem_free(data, totlen);
1237 1.2.6.2 yamt return ret;
1238 1.2.6.2 yamt }
1239