vfs_wapbl.c revision 1.1.2.7 1 /* $NetBSD: vfs_wapbl.c,v 1.1.2.7 2008/06/19 03:27:23 simonb Exp $ */
2
3 /*-
4 * Copyright (c) 2003,2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This implements file system independent write ahead filesystem logging.
34 */
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.1.2.7 2008/06/19 03:27:23 simonb Exp $");
37
38 #include <sys/param.h>
39
40 #ifdef _KERNEL
41 #include <sys/param.h>
42 #include <sys/namei.h>
43 #include <sys/proc.h>
44 #include <sys/uio.h>
45 #include <sys/vnode.h>
46 #include <sys/file.h>
47 #include <sys/malloc.h>
48 #include <sys/resourcevar.h>
49 #include <sys/conf.h>
50 #include <sys/mount.h>
51 #include <sys/kernel.h>
52 #include <sys/kauth.h>
53 #include <sys/mutex.h>
54 #include <sys/wapbl.h>
55
56 #if WAPBL_UVM_ALLOC
57 #include <uvm/uvm.h>
58 #endif
59
60 #include <miscfs/specfs/specdev.h>
61
62 MALLOC_JUSTDEFINE(M_WAPBL, "wapbl", "write-ahead physical block logging");
63 #define wapbl_malloc(s) malloc((s), M_WAPBL, M_WAITOK)
64 #define wapbl_free(a) free((a), M_WAPBL)
65 #define wapbl_calloc(n, s) malloc((n)*(s), M_WAPBL, M_WAITOK | M_ZERO)
66
67 #else /* !_KERNEL */
68 #include <assert.h>
69 #include <errno.h>
70 #include <stdio.h>
71 #include <stdbool.h>
72 #include <stdlib.h>
73 #include <string.h>
74
75 #include <sys/time.h>
76 #include <sys/wapbl.h>
77
78 #define KDASSERT(x) assert(x)
79 #define KASSERT(x) assert(x)
80 #define wapbl_malloc(s) malloc(s)
81 #define wapbl_free(a) free(a)
82 #define wapbl_calloc(n, s) calloc((n), (s))
83
84 #endif /* !_KERNEL */
85
86 /*
87 * INTERNAL DATA STRUCTURES
88 */
89
90 /*
91 * This structure holds per-mount log information.
92 *
93 * Legend: a = atomic access only
94 * r = read-only after init
95 * l = rwlock held
96 * m = mutex held
97 * u = unlocked access ok
98 * b = bufcache_lock held
99 */
100 struct wapbl {
101 struct vnode *wl_logvp; /* r: log here */
102 struct vnode *wl_devvp; /* r: log on this device */
103 struct mount *wl_mount; /* r: mountpoint wl is associated with */
104 daddr_t wl_logpbn; /* r: Physical block number of start of log */
105 int wl_log_dev_bshift; /* r: logarithm of device block size of log
106 device */
107 int wl_fs_dev_bshift; /* r: logarithm of device block size of
108 filesystem device */
109
110 unsigned wl_lock_count; /* a: Count of transactions in progress */
111
112 size_t wl_circ_size; /* r: Number of bytes in buffer of log */
113 size_t wl_circ_off; /* r: Number of bytes reserved at start */
114
115 size_t wl_bufcount_max; /* r: Number of buffers reserved for log */
116 size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */
117
118 off_t wl_head; /* l: Byte offset of log head */
119 off_t wl_tail; /* l: Byte offset of log tail */
120 /*
121 * head == tail == 0 means log is empty
122 * head == tail != 0 means log is full
123 * see assertions in wapbl_advance() for other boundary conditions.
124 * only truncate moves the tail, except when flush sets it to
125 * wl_header_size only flush moves the head, except when truncate
126 * sets it to 0.
127 */
128
129 struct wapbl_wc_header *wl_wc_header; /* l */
130 void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */
131
132 kmutex_t wl_mtx; /* u: short-term lock */
133 krwlock_t wl_rwlock; /* u: File system transaction lock */
134
135 /*
136 * Must be held while accessing
137 * wl_count or wl_bufs or head or tail
138 */
139
140 /*
141 * Callback called from within the flush routine to flush any extra
142 * bits. Note that flush may be skipped without calling this if
143 * there are no outstanding buffers in the transaction.
144 */
145 wapbl_flush_fn_t wl_flush; /* r */
146 wapbl_flush_fn_t wl_flush_abort;/* r */
147
148 size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */
149 size_t wl_bufcount; /* m: Count of buffers in wl_bufs */
150 size_t wl_bcount; /* m: Total bcount of wl_bufs */
151
152 LIST_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
153
154 kcondvar_t wl_reclaimable_cv; /* m (obviously) */
155 size_t wl_reclaimable_bytes; /* m: Amount of space available for
156 reclamation by truncate */
157 int wl_error_count; /* m: # of wl_entries with errors */
158 size_t wl_reserved_bytes; /* never truncate log smaller than this */
159
160 #ifdef WAPBL_DEBUG_BUFBYTES
161 size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
162 #endif
163
164 daddr_t *wl_deallocblks;/* l: address of block */
165 int *wl_dealloclens; /* l: size of block (fragments, kom ihg) */
166 int wl_dealloccnt; /* l: total count */
167 int wl_dealloclim; /* l: max count */
168
169 /* hashtable of inode numbers for allocated but unlinked inodes */
170 /* synch ??? */
171 LIST_HEAD(wapbl_ino_head, wapbl_ino) *wl_inohash;
172 u_long wl_inohashmask;
173 int wl_inohashcnt;
174
175 SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* On disk transaction
176 accounting */
177 };
178
179 #ifdef WAPBL_DEBUG_PRINT
180 int wapbl_debug_print = WAPBL_DEBUG_PRINT;
181 #endif
182
183 /****************************************************************/
184 #ifdef _KERNEL
185
186 #ifdef WAPBL_DEBUG
187 struct wapbl *wapbl_debug_wl;
188 #endif
189
190 static int wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail);
191 static int wapbl_write_blocks(struct wapbl *wl, off_t *offp);
192 static int wapbl_write_revocations(struct wapbl *wl, off_t *offp);
193 static int wapbl_write_inodes(struct wapbl *wl, off_t *offp);
194 #endif /* _KERNEL */
195
196 static int wapbl_replay_prescan(struct wapbl_replay *wr);
197 static int wapbl_replay_get_inodes(struct wapbl_replay *wr);
198
199 static __inline size_t wapbl_space_free(size_t avail, off_t head,
200 off_t tail);
201 static __inline size_t wapbl_space_used(size_t avail, off_t head,
202 off_t tail);
203
204 #ifdef _KERNEL
205
206 #define WAPBL_INODETRK_SIZE 83
207 static int wapbl_ino_pool_refcount;
208 static struct pool wapbl_ino_pool;
209 struct wapbl_ino {
210 LIST_ENTRY(wapbl_ino) wi_hash;
211 ino_t wi_ino;
212 mode_t wi_mode;
213 };
214
215 static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
216 static void wapbl_inodetrk_free(struct wapbl *wl);
217 static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
218
219 static size_t wapbl_transaction_len(struct wapbl *wl);
220 static __inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
221
222 /*
223 * This is useful for debugging. If set, the log will
224 * only be truncated when necessary.
225 */
226 int wapbl_lazy_truncate = 0;
227
228 struct wapbl_ops wapbl_ops = {
229 .wo_wapbl_discard = wapbl_discard,
230 .wo_wapbl_replay_isopen = wapbl_replay_isopen1,
231 .wo_wapbl_replay_read = wapbl_replay_read,
232 .wo_wapbl_add_buf = wapbl_add_buf,
233 .wo_wapbl_remove_buf = wapbl_remove_buf,
234 .wo_wapbl_resize_buf = wapbl_resize_buf,
235 .wo_wapbl_begin = wapbl_begin,
236 .wo_wapbl_end = wapbl_end,
237 .wo_wapbl_junlock_assert= wapbl_junlock_assert,
238
239 /* XXX: the following is only used to say "this is a wapbl buf" */
240 .wo_wapbl_biodone = wapbl_biodone,
241 };
242
243 void
244 wapbl_init()
245 {
246
247 malloc_type_attach(M_WAPBL);
248 }
249
250 int
251 wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
252 daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
253 wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
254 {
255 struct wapbl *wl;
256 struct vnode *devvp;
257 daddr_t logpbn;
258 int error;
259 int log_dev_bshift = DEV_BSHIFT;
260 int fs_dev_bshift = DEV_BSHIFT;
261 int run;
262
263 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_start: vp=%p off=%" PRId64
264 " count=%zu blksize=%zu\n", vp, off, count, blksize));
265
266 if (log_dev_bshift > fs_dev_bshift) {
267 WAPBL_PRINTF(WAPBL_PRINT_OPEN,
268 ("wapbl: log device's block size cannot be larger "
269 "than filesystem's\n"));
270 /*
271 * Not currently implemented, although it could be if
272 * needed someday.
273 */
274 return ENOSYS;
275 }
276
277 if (off < 0)
278 return EINVAL;
279
280 if (blksize < DEV_BSIZE)
281 return EINVAL;
282 if (blksize % DEV_BSIZE)
283 return EINVAL;
284
285 /* XXXTODO: verify that the full load is writable */
286
287 /*
288 * XXX check for minimum log size
289 * minimum is governed by minimum amount of space
290 * to complete a transaction. (probably truncate)
291 */
292 /* XXX for now pick something minimal */
293 if ((count * blksize) < MAXPHYS) {
294 return ENOSPC;
295 }
296
297 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
298 return error;
299 }
300
301 wl = wapbl_calloc(1, sizeof(*wl));
302 rw_init(&wl->wl_rwlock);
303 mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
304 cv_init(&wl->wl_reclaimable_cv, "wapblrec");
305 LIST_INIT(&wl->wl_bufs);
306 SIMPLEQ_INIT(&wl->wl_entries);
307
308 wl->wl_logvp = vp;
309 wl->wl_devvp = devvp;
310 wl->wl_mount = mp;
311 wl->wl_logpbn = logpbn;
312 wl->wl_log_dev_bshift = log_dev_bshift;
313 wl->wl_fs_dev_bshift = fs_dev_bshift;
314
315 wl->wl_flush = flushfn;
316 wl->wl_flush_abort = flushabortfn;
317
318 /* Reserve two log device blocks for the commit headers */
319 wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
320 wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
321 /* truncate the log usage to a multiple of log_dev_bshift */
322 wl->wl_circ_size >>= wl->wl_log_dev_bshift;
323 wl->wl_circ_size <<= wl->wl_log_dev_bshift;
324
325 /*
326 * wl_bufbytes_max limits the size of the in memory transaction space.
327 * - Since buffers are allocated and accounted for in units of
328 * PAGE_SIZE it is required to be a multiple of PAGE_SIZE
329 * (i.e. 1<<PAGE_SHIFT)
330 * - Since the log device has to be written in units of
331 * 1<<wl_log_dev_bshift it is required to be a mulitple of
332 * 1<<wl_log_dev_bshift.
333 * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
334 * it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
335 * Therefore it must be multiple of the least common multiple of those
336 * three quantities. Fortunately, all of those quantities are
337 * guaranteed to be a power of two, and the least common multiple of
338 * a set of numbers which are all powers of two is simply the maximum
339 * of those numbers. Finally, the maximum logarithm of a power of two
340 * is the same as the log of the maximum power of two. So we can do
341 * the following operations to size wl_bufbytes_max:
342 */
343
344 /* XXX fix actual number of pages reserved per filesystem. */
345 wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
346
347 /* Round wl_bufbytes_max to the largest power of two constraint */
348 wl->wl_bufbytes_max >>= PAGE_SHIFT;
349 wl->wl_bufbytes_max <<= PAGE_SHIFT;
350 wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
351 wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
352 wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
353 wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
354
355 /* XXX maybe use filesystem fragment size instead of 1024 */
356 /* XXX fix actual number of buffers reserved per filesystem. */
357 wl->wl_bufcount_max = (nbuf / 2) * 1024;
358
359 /* XXX tie this into resource estimation */
360 wl->wl_dealloclim = 2 * btodb(wl->wl_bufbytes_max);
361
362 #if WAPBL_UVM_ALLOC
363 wl->wl_deallocblks = (void *) uvm_km_zalloc(kernel_map,
364 round_page(sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim));
365 KASSERT(wl->wl_deallocblks != NULL);
366 wl->wl_dealloclens = (void *) uvm_km_zalloc(kernel_map,
367 round_page(sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim));
368 KASSERT(wl->wl_dealloclens != NULL);
369 #else
370 wl->wl_deallocblks = wapbl_malloc(sizeof(*wl->wl_deallocblks) *
371 wl->wl_dealloclim);
372 wl->wl_dealloclens = wapbl_malloc(sizeof(*wl->wl_dealloclens) *
373 wl->wl_dealloclim);
374 #endif
375
376 wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
377
378 /* Initialize the commit header */
379 {
380 struct wapbl_wc_header *wc;
381 size_t len = 1<<wl->wl_log_dev_bshift;
382 wc = wapbl_calloc(1, len);
383 wc->wc_type = WAPBL_WC_HEADER;
384 wc->wc_len = len;
385 wc->wc_circ_off = wl->wl_circ_off;
386 wc->wc_circ_size = wl->wl_circ_size;
387 /* XXX wc->wc_fsid */
388 wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
389 wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
390 wl->wl_wc_header = wc;
391 wl->wl_wc_scratch = wapbl_malloc(len);
392 }
393
394 /*
395 * if there was an existing set of unlinked but
396 * allocated inodes, preserve it in the new
397 * log.
398 */
399 if (wr && wr->wr_inodescnt) {
400 int i;
401
402 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
403 ("wapbl_start: reusing log with %d inodes\n",
404 wr->wr_inodescnt));
405
406 /*
407 * Its only valid to reuse the replay log if its
408 * the same as the new log we just opened.
409 */
410 KDASSERT(!wapbl_replay_isopen(wr));
411 KASSERT(devvp->v_rdev == wr->wr_devvp->v_rdev);
412 KASSERT(logpbn == wr->wr_logpbn);
413 KASSERT(wl->wl_circ_size == wr->wr_wc_header.wc_circ_size);
414 KASSERT(wl->wl_circ_off == wr->wr_wc_header.wc_circ_off);
415 KASSERT(wl->wl_log_dev_bshift ==
416 wr->wr_wc_header.wc_log_dev_bshift);
417 KASSERT(wl->wl_fs_dev_bshift ==
418 wr->wr_wc_header.wc_fs_dev_bshift);
419
420 wl->wl_wc_header->wc_generation =
421 wr->wr_wc_header.wc_generation + 1;
422
423 for (i = 0; i < wr->wr_inodescnt; i++)
424 wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
425 wr->wr_inodes[i].wr_imode);
426
427 /* Make sure new transaction won't overwrite old inodes list */
428 KDASSERT(wapbl_transaction_len(wl) <=
429 wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
430 wr->wr_inodestail));
431
432 wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
433 wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
434 wapbl_transaction_len(wl);
435
436 error = wapbl_write_inodes(wl, &wl->wl_head);
437 if (error)
438 goto errout;
439
440 KASSERT(wl->wl_head != wl->wl_tail);
441 KASSERT(wl->wl_head != 0);
442 }
443
444 error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
445 if (error) {
446 goto errout;
447 }
448
449 *wlp = wl;
450 #if defined(WAPBL_DEBUG)
451 wapbl_debug_wl = wl;
452 #endif
453
454 return 0;
455 errout:
456 wapbl_discard(wl);
457 wapbl_free(wl->wl_wc_scratch);
458 wapbl_free(wl->wl_wc_header);
459 #if WAPBL_UVM_ALLOC
460 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_deallocblks,
461 round_page(sizeof(*wl->wl_deallocblks *
462 wl->wl_dealloclim)));
463 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_dealloclens,
464 round_page(sizeof(*wl->wl_dealloclens *
465 wl->wl_dealloclim)));
466 #else
467 wapbl_free(wl->wl_deallocblks);
468 wapbl_free(wl->wl_dealloclens);
469 #endif
470 wapbl_inodetrk_free(wl);
471 wapbl_free(wl);
472
473 return error;
474 }
475
476 /*
477 * Like wapbl_flush, only discards the transaction
478 * completely
479 */
480
481 void
482 wapbl_discard(struct wapbl *wl)
483 {
484 struct wapbl_entry *we;
485 struct buf *bp;
486 int i;
487
488 /*
489 * XXX we may consider using upgrade here
490 * if we want to call flush from inside a transaction
491 */
492 rw_enter(&wl->wl_rwlock, RW_WRITER);
493 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
494 wl->wl_dealloccnt);
495
496 #ifdef WAPBL_DEBUG_PRINT
497 {
498 struct wapbl_entry *we;
499 pid_t pid = -1;
500 lwpid_t lid = -1;
501 if (curproc)
502 pid = curproc->p_pid;
503 if (curlwp)
504 lid = curlwp->l_lid;
505 #ifdef WAPBL_DEBUG_BUFBYTES
506 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
507 ("wapbl_discard: thread %d.%d discarding "
508 "transaction\n"
509 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
510 "deallocs=%d inodes=%d\n"
511 "\terrcnt = %u, reclaimable=%zu reserved=%zu "
512 "unsynced=%zu\n",
513 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
514 wl->wl_bcount, wl->wl_dealloccnt,
515 wl->wl_inohashcnt, wl->wl_error_count,
516 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
517 wl->wl_unsynced_bufbytes));
518 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
519 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
520 ("\tentry: bufcount = %zu, reclaimable = %zu, "
521 "error = %d, unsynced = %zu\n",
522 we->we_bufcount, we->we_reclaimable_bytes,
523 we->we_error, we->we_unsynced_bufbytes));
524 }
525 #else /* !WAPBL_DEBUG_BUFBYTES */
526 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
527 ("wapbl_discard: thread %d.%d discarding transaction\n"
528 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
529 "deallocs=%d inodes=%d\n"
530 "\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
531 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
532 wl->wl_bcount, wl->wl_dealloccnt,
533 wl->wl_inohashcnt, wl->wl_error_count,
534 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes));
535 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
536 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
537 ("\tentry: bufcount = %zu, reclaimable = %zu, "
538 "error = %d\n",
539 we->we_bufcount, we->we_reclaimable_bytes,
540 we->we_error));
541 }
542 #endif /* !WAPBL_DEBUG_BUFBYTES */
543 }
544 #endif /* WAPBL_DEBUG_PRINT */
545
546 for (i = 0; i <= wl->wl_inohashmask; i++) {
547 struct wapbl_ino_head *wih;
548 struct wapbl_ino *wi;
549
550 wih = &wl->wl_inohash[i];
551 while ((wi = LIST_FIRST(wih)) != NULL) {
552 LIST_REMOVE(wi, wi_hash);
553 pool_put(&wapbl_ino_pool, wi);
554 KASSERT(wl->wl_inohashcnt > 0);
555 wl->wl_inohashcnt--;
556 }
557 }
558
559 /*
560 * clean buffer list
561 */
562 mutex_enter(&bufcache_lock);
563 mutex_enter(&wl->wl_mtx);
564 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
565 if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
566 /*
567 * The buffer will be unlocked and
568 * removed from the transaction in brelse
569 */
570 mutex_exit(&wl->wl_mtx);
571 brelsel(bp, 0);
572 mutex_enter(&wl->wl_mtx);
573 }
574 }
575 mutex_exit(&wl->wl_mtx);
576 mutex_exit(&bufcache_lock);
577
578 /*
579 * Remove references to this wl from wl_entries, free any which
580 * no longer have buffers, others will be freed in wapbl_biodone
581 * when they no longer have any buffers.
582 */
583 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
584 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
585 /* XXX should we be accumulating wl_error_count
586 * and increasing reclaimable bytes ? */
587 we->we_wapbl = NULL;
588 if (we->we_bufcount == 0) {
589 #ifdef WAPBL_DEBUG_BUFBYTES
590 KASSERT(we->we_unsynced_bufbytes == 0);
591 #endif
592 wapbl_free(we);
593 }
594 }
595
596 /* Discard list of deallocs */
597 wl->wl_dealloccnt = 0;
598 /* XXX should we clear wl_reserved_bytes? */
599
600 KASSERT(wl->wl_bufbytes == 0);
601 KASSERT(wl->wl_bcount == 0);
602 KASSERT(wl->wl_bufcount == 0);
603 KASSERT(LIST_EMPTY(&wl->wl_bufs));
604 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
605 KASSERT(wl->wl_inohashcnt == 0);
606
607 rw_exit(&wl->wl_rwlock);
608 }
609
610 int
611 wapbl_stop(struct wapbl *wl, int force)
612 {
613 struct vnode *vp;
614 int error;
615
616 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
617 error = wapbl_flush(wl, 1);
618 if (error) {
619 if (force)
620 wapbl_discard(wl);
621 else
622 return error;
623 }
624
625 /* Unlinked inodes persist after a flush */
626 if (wl->wl_inohashcnt) {
627 if (force) {
628 wapbl_discard(wl);
629 } else {
630 return EBUSY;
631 }
632 }
633
634 KASSERT(wl->wl_bufbytes == 0);
635 KASSERT(wl->wl_bcount == 0);
636 KASSERT(wl->wl_bufcount == 0);
637 KASSERT(LIST_EMPTY(&wl->wl_bufs));
638 KASSERT(wl->wl_dealloccnt == 0);
639 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
640 KASSERT(wl->wl_inohashcnt == 0);
641
642 vp = wl->wl_logvp;
643
644 wapbl_free(wl->wl_wc_scratch);
645 wapbl_free(wl->wl_wc_header);
646 #if WAPBL_UVM_ALLOC
647 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_deallocblks,
648 round_page(sizeof(*wl->wl_deallocblks *
649 wl->wl_dealloclim)));
650 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_dealloclens,
651 round_page(sizeof(*wl->wl_dealloclens *
652 wl->wl_dealloclim)));
653 #else
654 wapbl_free(wl->wl_deallocblks);
655 wapbl_free(wl->wl_dealloclens);
656 #endif
657 wapbl_inodetrk_free(wl);
658
659 cv_destroy(&wl->wl_reclaimable_cv);
660 mutex_destroy(&wl->wl_mtx);
661 rw_destroy(&wl->wl_rwlock);
662 wapbl_free(wl);
663
664 return 0;
665 }
666
667 static int
668 wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
669 {
670 struct pstats *pstats = curlwp->l_proc->p_stats;
671 struct buf *bp;
672 int error;
673
674 KASSERT((flags & ~(B_WRITE | B_READ)) == 0);
675 KASSERT(devvp->v_type == VBLK);
676
677 if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
678 devvp->v_numoutput++;
679 pstats->p_ru.ru_oublock++;
680 } else {
681 pstats->p_ru.ru_inblock++;
682 }
683
684 bp = getiobuf(devvp, true);
685 bp->b_flags = flags;
686 bp->b_cflags = BC_BUSY; /* silly & dubious */
687 bp->b_dev = devvp->v_rdev;
688 bp->b_data = data;
689 bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
690 bp->b_blkno = pbn;
691
692 WAPBL_PRINTF(WAPBL_PRINT_IO,
693 ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%x\n",
694 BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
695 bp->b_blkno, bp->b_dev));
696
697 VOP_STRATEGY(devvp, bp);
698
699 error = biowait(bp);
700 putiobuf(bp);
701
702 if (error) {
703 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
704 ("wapbl_doio: %s %zu bytes at block %" PRId64
705 " on dev 0x%x failed with error %d\n",
706 (((flags & (B_WRITE | B_READ)) == B_WRITE) ?
707 "write" : "read"),
708 len, pbn, devvp->v_rdev, error));
709 }
710
711 return error;
712 }
713
714 int
715 wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
716 {
717
718 return wapbl_doio(data, len, devvp, pbn, B_WRITE);
719 }
720
721 int
722 wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
723 {
724
725 return wapbl_doio(data, len, devvp, pbn, B_READ);
726 }
727
728 /*
729 * Off is byte offset returns new offset for next write
730 * handles log wraparound
731 */
732 static int
733 wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
734 {
735 size_t slen;
736 off_t off = *offp;
737 int error;
738
739 KDASSERT(((len >> wl->wl_log_dev_bshift) <<
740 wl->wl_log_dev_bshift) == len);
741
742 if (off < wl->wl_circ_off)
743 off = wl->wl_circ_off;
744 slen = wl->wl_circ_off + wl->wl_circ_size - off;
745 if (slen < len) {
746 error = wapbl_write(data, slen, wl->wl_devvp,
747 wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
748 if (error)
749 return error;
750 data = (uint8_t *)data + slen;
751 len -= slen;
752 off = wl->wl_circ_off;
753 }
754 error = wapbl_write(data, len, wl->wl_devvp,
755 wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
756 if (error)
757 return error;
758 off += len;
759 if (off >= wl->wl_circ_off + wl->wl_circ_size)
760 off = wl->wl_circ_off;
761 *offp = off;
762 return 0;
763 }
764
765 /****************************************************************/
766
767 int
768 wapbl_begin(struct wapbl *wl, const char *file, int line)
769 {
770 int doflush;
771 unsigned lockcount;
772 krw_t op;
773
774 KDASSERT(wl);
775
776 #ifdef WAPBL_DEBUG_SERIALIZE
777 op = RW_WRITER;
778 #else
779 op = RW_READER;
780 #endif
781
782 /*
783 * XXX this needs to be made much more sophisticated.
784 * perhaps each wapbl_begin could reserve a specified
785 * number of buffers and bytes.
786 */
787 mutex_enter(&wl->wl_mtx);
788 lockcount = wl->wl_lock_count;
789 doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) >
790 wl->wl_bufbytes_max / 2) ||
791 ((wl->wl_bufcount + (lockcount * 10)) >
792 wl->wl_bufcount_max / 2) ||
793 (wapbl_transaction_len(wl) > wl->wl_circ_size / 2);
794 mutex_exit(&wl->wl_mtx);
795
796 if (doflush) {
797 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
798 ("force flush lockcnt=%d bufbytes=%zu "
799 "(max=%zu) bufcount=%zu (max=%zu)\n",
800 lockcount, wl->wl_bufbytes,
801 wl->wl_bufbytes_max, wl->wl_bufcount,
802 wl->wl_bufcount_max));
803 }
804
805 if (doflush) {
806 int error = wapbl_flush(wl, 0);
807 if (error)
808 return error;
809 }
810
811 rw_enter(&wl->wl_rwlock, op);
812 mutex_enter(&wl->wl_mtx);
813 wl->wl_lock_count++;
814 mutex_exit(&wl->wl_mtx);
815
816 #if defined(WAPBL_DEBUG_PRINT) && defined(WAPBL_DEBUG_SERIALIZE)
817 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
818 ("wapbl_begin thread %d.%d with bufcount=%zu "
819 "bufbytes=%zu bcount=%zu at %s:%d\n",
820 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
821 wl->wl_bufbytes, wl->wl_bcount, file, line));
822 #endif
823
824 return 0;
825 }
826
827 void
828 wapbl_end(struct wapbl *wl)
829 {
830
831 #if defined(WAPBL_DEBUG_PRINT) && defined(WAPBL_DEBUG_SERIALIZE)
832 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
833 ("wapbl_end thread %d.%d with bufcount=%zu "
834 "bufbytes=%zu bcount=%zu\n",
835 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
836 wl->wl_bufbytes, wl->wl_bcount));
837 #endif
838
839 mutex_enter(&wl->wl_mtx);
840 KASSERT(wl->wl_lock_count > 0);
841 wl->wl_lock_count--;
842 mutex_exit(&wl->wl_mtx);
843
844 rw_exit(&wl->wl_rwlock);
845 }
846
847 void
848 wapbl_add_buf(struct wapbl *wl, struct buf * bp)
849 {
850
851 KASSERT(bp->b_cflags & BC_BUSY);
852 KASSERT(bp->b_vp);
853
854 wapbl_jlock_assert(wl);
855
856 #if 0
857 /*
858 * XXX this might be an issue for swapfiles.
859 * see uvm_swap.c:1702
860 *
861 * XXX2 why require it then? leap of semantics?
862 */
863 KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
864 #endif
865
866 mutex_enter(&wl->wl_mtx);
867 if (bp->b_flags & B_LOCKED) {
868 LIST_REMOVE(bp, b_wapbllist);
869 WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
870 ("wapbl_add_buf thread %d.%d re-adding buf %p "
871 "with %d bytes %d bcount\n",
872 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
873 bp->b_bcount));
874 } else {
875 /* unlocked by dirty buffers shouldn't exist */
876 KASSERT(!(bp->b_oflags & BO_DELWRI));
877 wl->wl_bufbytes += bp->b_bufsize;
878 wl->wl_bcount += bp->b_bcount;
879 wl->wl_bufcount++;
880 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
881 ("wapbl_add_buf thread %d.%d adding buf %p "
882 "with %d bytes %d bcount\n",
883 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
884 bp->b_bcount));
885 }
886 LIST_INSERT_HEAD(&wl->wl_bufs, bp, b_wapbllist);
887 mutex_exit(&wl->wl_mtx);
888
889 bp->b_flags |= B_LOCKED;
890 }
891
892 static void
893 wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
894 {
895
896 KASSERT(mutex_owned(&wl->wl_mtx));
897 KASSERT(bp->b_cflags & BC_BUSY);
898 wapbl_jlock_assert(wl);
899
900 #if 0
901 /*
902 * XXX this might be an issue for swapfiles.
903 * see uvm_swap.c:1725
904 *
905 * XXXdeux: see above
906 */
907 KASSERT((bp->b_flags & BC_NOCACHE) == 0);
908 #endif
909 KASSERT(bp->b_flags & B_LOCKED);
910
911 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
912 ("wapbl_remove_buf thread %d.%d removing buf %p with "
913 "%d bytes %d bcount\n",
914 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, bp->b_bcount));
915
916 KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
917 wl->wl_bufbytes -= bp->b_bufsize;
918 KASSERT(wl->wl_bcount >= bp->b_bcount);
919 wl->wl_bcount -= bp->b_bcount;
920 KASSERT(wl->wl_bufcount > 0);
921 wl->wl_bufcount--;
922 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
923 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
924 LIST_REMOVE(bp, b_wapbllist);
925
926 bp->b_flags &= ~B_LOCKED;
927 }
928
929 /* called from brelsel() in vfs_bio among other places */
930 void
931 wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
932 {
933
934 mutex_enter(&wl->wl_mtx);
935 wapbl_remove_buf_locked(wl, bp);
936 mutex_exit(&wl->wl_mtx);
937 }
938
939 void
940 wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
941 {
942
943 KASSERT(bp->b_cflags & BC_BUSY);
944
945 /*
946 * XXX: why does this depend on B_LOCKED? otherwise the buf
947 * is not for a transaction? if so, why is this called in the
948 * first place?
949 */
950 if (bp->b_flags & B_LOCKED) {
951 mutex_enter(&wl->wl_mtx);
952 wl->wl_bufbytes += bp->b_bufsize - oldsz;
953 wl->wl_bcount += bp->b_bcount - oldcnt;
954 mutex_exit(&wl->wl_mtx);
955 }
956 }
957
958 #endif /* _KERNEL */
959
960 /****************************************************************/
961 /* Some utility inlines */
962
963 /* This is used to advance the pointer at old to new value at old+delta */
964 static __inline off_t
965 wapbl_advance(size_t size, size_t off, off_t old, size_t delta)
966 {
967 off_t new;
968
969 /* Define acceptable ranges for inputs. */
970 KASSERT(delta <= size);
971 KASSERT((old == 0) || (old >= off));
972 KASSERT(old < (size + off));
973
974 if ((old == 0) && (delta != 0))
975 new = off + delta;
976 else if ((old + delta) < (size + off))
977 new = old + delta;
978 else
979 new = (old + delta) - size;
980
981 /* Note some interesting axioms */
982 KASSERT((delta != 0) || (new == old));
983 KASSERT((delta == 0) || (new != 0));
984 KASSERT((delta != (size)) || (new == old));
985
986 /* Define acceptable ranges for output. */
987 KASSERT((new == 0) || (new >= off));
988 KASSERT(new < (size + off));
989 return new;
990 }
991
992 static __inline size_t
993 wapbl_space_used(size_t avail, off_t head, off_t tail)
994 {
995
996 if (tail == 0) {
997 KASSERT(head == 0);
998 return 0;
999 }
1000 return ((head + (avail - 1) - tail) % avail) + 1;
1001 }
1002
1003 static __inline size_t
1004 wapbl_space_free(size_t avail, off_t head, off_t tail)
1005 {
1006
1007 return avail - wapbl_space_used(avail, head, tail);
1008 }
1009
1010 static __inline void
1011 wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1012 off_t *tailp)
1013 {
1014 off_t head = *headp;
1015 off_t tail = *tailp;
1016
1017 KASSERT(delta <= wapbl_space_free(size, head, tail));
1018 head = wapbl_advance(size, off, head, delta);
1019 if ((tail == 0) && (head != 0))
1020 tail = off;
1021 *headp = head;
1022 *tailp = tail;
1023 }
1024
1025 static __inline void
1026 wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1027 off_t *tailp)
1028 {
1029 off_t head = *headp;
1030 off_t tail = *tailp;
1031
1032 KASSERT(delta <= wapbl_space_used(size, head, tail));
1033 tail = wapbl_advance(size, off, tail, delta);
1034 if (head == tail) {
1035 head = tail = 0;
1036 }
1037 *headp = head;
1038 *tailp = tail;
1039 }
1040
1041 #ifdef _KERNEL
1042
1043 /****************************************************************/
1044
1045 /*
1046 * Remove transactions whose buffers are completely flushed to disk.
1047 * Will block until at least minfree space is available.
1048 * only intended to be called from inside wapbl_flush and therefore
1049 * does not protect against commit races with itself or with flush.
1050 */
1051 static int
1052 wapbl_truncate(struct wapbl *wl, size_t minfree, int waitonly)
1053 {
1054 size_t delta;
1055 size_t avail;
1056 off_t head;
1057 off_t tail;
1058 int error = 0;
1059
1060 KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1061 KASSERT(rw_write_held(&wl->wl_rwlock));
1062
1063 mutex_enter(&wl->wl_mtx);
1064
1065 /*
1066 * First check to see if we have to do a commit
1067 * at all.
1068 */
1069 avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1070 if (minfree < avail) {
1071 mutex_exit(&wl->wl_mtx);
1072 return 0;
1073 }
1074 minfree -= avail;
1075 while ((wl->wl_error_count == 0) &&
1076 (wl->wl_reclaimable_bytes < minfree)) {
1077 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1078 ("wapbl_truncate: sleeping on %p wl=%p bytes=%zd "
1079 "minfree=%zd\n",
1080 &wl->wl_reclaimable_bytes, wl, wl->wl_reclaimable_bytes,
1081 minfree));
1082
1083 cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1084 }
1085 if (wl->wl_reclaimable_bytes < minfree) {
1086 KASSERT(wl->wl_error_count);
1087 /* XXX maybe get actual error from buffer instead someday? */
1088 error = EIO;
1089 }
1090 head = wl->wl_head;
1091 tail = wl->wl_tail;
1092 delta = wl->wl_reclaimable_bytes;
1093
1094 /* If all of of the entries are flushed, then be sure to keep
1095 * the reserved bytes reserved. Watch out for discarded transactions,
1096 * which could leave more bytes reserved than are reclaimable.
1097 */
1098 if (SIMPLEQ_EMPTY(&wl->wl_entries) &&
1099 (delta >= wl->wl_reserved_bytes)) {
1100 delta -= wl->wl_reserved_bytes;
1101 }
1102 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1103 &tail);
1104 KDASSERT(wl->wl_reserved_bytes <=
1105 wapbl_space_used(wl->wl_circ_size, head, tail));
1106 mutex_exit(&wl->wl_mtx);
1107
1108 if (error)
1109 return error;
1110
1111 if (waitonly)
1112 return 0;
1113
1114 /*
1115 * This is where head, tail and delta are unprotected
1116 * from races against itself or flush. This is ok since
1117 * we only call this routine from inside flush itself.
1118 *
1119 * XXX: how can it race against itself when accessed only
1120 * from behind the write-locked rwlock?
1121 */
1122 error = wapbl_write_commit(wl, head, tail);
1123 if (error)
1124 return error;
1125
1126 wl->wl_head = head;
1127 wl->wl_tail = tail;
1128
1129 mutex_enter(&wl->wl_mtx);
1130 KASSERT(wl->wl_reclaimable_bytes >= delta);
1131 wl->wl_reclaimable_bytes -= delta;
1132 mutex_exit(&wl->wl_mtx);
1133 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1134 ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1135 curproc->p_pid, curlwp->l_lid, delta));
1136
1137 return 0;
1138 }
1139
1140 /****************************************************************/
1141
1142 void
1143 wapbl_biodone(struct buf *bp)
1144 {
1145 struct wapbl_entry *we = bp->b_private;
1146 struct wapbl *wl = we->we_wapbl;
1147
1148 /*
1149 * Handle possible flushing of buffers after log has been
1150 * decomissioned.
1151 */
1152 if (!wl) {
1153 KASSERT(we->we_bufcount > 0);
1154 we->we_bufcount--;
1155 #ifdef WAPBL_DEBUG_BUFBYTES
1156 KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1157 we->we_unsynced_bufbytes -= bp->b_bufsize;
1158 #endif
1159
1160 if (we->we_bufcount == 0) {
1161 #ifdef WAPBL_DEBUG_BUFBYTES
1162 KASSERT(we->we_unsynced_bufbytes == 0);
1163 #endif
1164 wapbl_free(we);
1165 }
1166
1167 brelse(bp, 0);
1168 return;
1169 }
1170
1171 #ifdef ohbother
1172 KDASSERT(bp->b_flags & B_DONE);
1173 KDASSERT(!(bp->b_flags & B_DELWRI));
1174 KDASSERT(bp->b_flags & B_ASYNC);
1175 KDASSERT(bp->b_flags & B_BUSY);
1176 KDASSERT(!(bp->b_flags & B_LOCKED));
1177 KDASSERT(!(bp->b_flags & B_READ));
1178 KDASSERT(!(bp->b_flags & B_INVAL));
1179 KDASSERT(!(bp->b_flags & B_NOCACHE));
1180 #endif
1181
1182 if (bp->b_error) {
1183 #ifdef notyet /* Can't currently handle possible dirty buffer reuse */
1184 XXXpooka: interfaces not fully updated
1185 Note: this was not enabled in the original patch
1186 against netbsd4 either. I don't know if comment
1187 above is true or not.
1188
1189 /*
1190 * If an error occurs, report the error and leave the
1191 * buffer as a delayed write on the LRU queue.
1192 * restarting the write would likely result in
1193 * an error spinloop, so let it be done harmlessly
1194 * by the syncer.
1195 */
1196 bp->b_flags &= ~(B_DONE);
1197 simple_unlock(&bp->b_interlock);
1198
1199 if (we->we_error == 0) {
1200 mutex_enter(&wl->wl_mtx);
1201 wl->wl_error_count++;
1202 mutex_exit(&wl->wl_mtx);
1203 cv_broadcast(&wl->wl_reclaimable_cv);
1204 }
1205 we->we_error = bp->b_error;
1206 bp->b_error = 0;
1207 brelse(bp);
1208 return;
1209 #else
1210 /* For now, just mark the log permanently errored out */
1211
1212 mutex_enter(&wl->wl_mtx);
1213 if (wl->wl_error_count == 0) {
1214 wl->wl_error_count++;
1215 cv_broadcast(&wl->wl_reclaimable_cv);
1216 }
1217 mutex_exit(&wl->wl_mtx);
1218 #endif
1219 }
1220
1221 mutex_enter(&wl->wl_mtx);
1222
1223 KASSERT(we->we_bufcount > 0);
1224 we->we_bufcount--;
1225 #ifdef WAPBL_DEBUG_BUFBYTES
1226 KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1227 we->we_unsynced_bufbytes -= bp->b_bufsize;
1228 KASSERT(wl->wl_unsynced_bufbytes >= bp->b_bufsize);
1229 wl->wl_unsynced_bufbytes -= bp->b_bufsize;
1230 #endif
1231
1232 /*
1233 * If the current transaction can be reclaimed, start
1234 * at the beginning and reclaim any consecutive reclaimable
1235 * transactions. If we successfully reclaim anything,
1236 * then wakeup anyone waiting for the reclaim.
1237 */
1238 if (we->we_bufcount == 0) {
1239 size_t delta = 0;
1240 int errcnt = 0;
1241 #ifdef WAPBL_DEBUG_BUFBYTES
1242 KDASSERT(we->we_unsynced_bufbytes == 0);
1243 #endif
1244 /*
1245 * clear any posted error, since the buffer it came from
1246 * has successfully flushed by now
1247 */
1248 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1249 (we->we_bufcount == 0)) {
1250 delta += we->we_reclaimable_bytes;
1251 if (we->we_error)
1252 errcnt++;
1253 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1254 wapbl_free(we);
1255 }
1256
1257 if (delta) {
1258 wl->wl_reclaimable_bytes += delta;
1259 KASSERT(wl->wl_error_count >= errcnt);
1260 wl->wl_error_count -= errcnt;
1261 cv_broadcast(&wl->wl_reclaimable_cv);
1262 }
1263 }
1264
1265 mutex_exit(&wl->wl_mtx);
1266 brelse(bp, 0);
1267 }
1268
1269 /*
1270 * Write transactions to disk + start I/O for contents
1271 */
1272 int
1273 wapbl_flush(struct wapbl *wl, int waitfor)
1274 {
1275 struct buf *bp;
1276 struct wapbl_entry *we;
1277 off_t off;
1278 off_t head;
1279 off_t tail;
1280 size_t delta = 0;
1281 size_t flushsize;
1282 size_t reserved;
1283 int error = 0;
1284
1285 /*
1286 * Do a quick check to see if a full flush can be skipped
1287 * This assumes that the flush callback does not need to be called
1288 * unless there are other outstanding bufs.
1289 */
1290 if (!waitfor) {
1291 size_t nbufs;
1292 mutex_enter(&wl->wl_mtx); /* XXX need mutex here to
1293 protect the KASSERTS */
1294 nbufs = wl->wl_bufcount;
1295 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1296 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1297 mutex_exit(&wl->wl_mtx);
1298 if (nbufs == 0)
1299 return 0;
1300 }
1301
1302 /*
1303 * XXX we may consider using LK_UPGRADE here
1304 * if we want to call flush from inside a transaction
1305 */
1306 rw_enter(&wl->wl_rwlock, RW_WRITER);
1307 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
1308 wl->wl_dealloccnt);
1309
1310 /*
1311 * Now that we are fully locked and flushed,
1312 * do another check for nothing to do.
1313 */
1314 if (wl->wl_bufcount == 0) {
1315 goto out;
1316 }
1317
1318 #if 0
1319 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1320 ("wapbl_flush thread %d.%d flushing entries with "
1321 "bufcount=%zu bufbytes=%zu\n",
1322 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1323 wl->wl_bufbytes));
1324 #endif
1325
1326 /* Calculate amount of space needed to flush */
1327 flushsize = wapbl_transaction_len(wl);
1328
1329 if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1330 /*
1331 * XXX this could be handled more gracefully, perhaps place
1332 * only a partial transaction in the log and allow the
1333 * remaining to flush without the protection of the journal.
1334 */
1335 panic("wapbl_flush: current transaction too big to flush\n");
1336 }
1337
1338 error = wapbl_truncate(wl, flushsize, 0);
1339 if (error)
1340 goto out2;
1341
1342 off = wl->wl_head;
1343 KASSERT((off == 0) || ((off >= wl->wl_circ_off) &&
1344 (off < wl->wl_circ_off + wl->wl_circ_size)));
1345 error = wapbl_write_blocks(wl, &off);
1346 if (error)
1347 goto out2;
1348 error = wapbl_write_revocations(wl, &off);
1349 if (error)
1350 goto out2;
1351 error = wapbl_write_inodes(wl, &off);
1352 if (error)
1353 goto out2;
1354
1355 reserved = 0;
1356 if (wl->wl_inohashcnt)
1357 reserved = wapbl_transaction_inodes_len(wl);
1358
1359 head = wl->wl_head;
1360 tail = wl->wl_tail;
1361
1362 wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1363 &head, &tail);
1364 #ifdef WAPBL_DEBUG
1365 if (head != off) {
1366 panic("lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1367 " off=%"PRIdMAX" flush=%zu\n",
1368 (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1369 flushsize);
1370 }
1371 #else
1372 KASSERT(head == off);
1373 #endif
1374
1375 /* Opportunistically move the tail forward if we can */
1376 if (!wapbl_lazy_truncate) {
1377 mutex_enter(&wl->wl_mtx);
1378 delta = wl->wl_reclaimable_bytes;
1379 mutex_exit(&wl->wl_mtx);
1380 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1381 &head, &tail);
1382 }
1383
1384 error = wapbl_write_commit(wl, head, tail);
1385 if (error)
1386 goto out2;
1387
1388 /* poolme? or kmemme? */
1389 we = wapbl_calloc(1, sizeof(*we));
1390
1391 #ifdef WAPBL_DEBUG_BUFBYTES
1392 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1393 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1394 " unsynced=%zu"
1395 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1396 "inodes=%d\n",
1397 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1398 wapbl_space_used(wl->wl_circ_size, head, tail),
1399 wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1400 wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1401 wl->wl_inohashcnt));
1402 #else
1403 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1404 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1405 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1406 "inodes=%d\n",
1407 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1408 wapbl_space_used(wl->wl_circ_size, head, tail),
1409 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1410 wl->wl_dealloccnt, wl->wl_inohashcnt));
1411 #endif
1412
1413
1414 mutex_enter(&bufcache_lock);
1415 mutex_enter(&wl->wl_mtx);
1416
1417 wl->wl_reserved_bytes = reserved;
1418 wl->wl_head = head;
1419 wl->wl_tail = tail;
1420 KASSERT(wl->wl_reclaimable_bytes >= delta);
1421 wl->wl_reclaimable_bytes -= delta;
1422 wl->wl_dealloccnt = 0;
1423 #ifdef WAPBL_DEBUG_BUFBYTES
1424 wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1425 #endif
1426
1427 we->we_wapbl = wl;
1428 we->we_bufcount = wl->wl_bufcount;
1429 #ifdef WAPBL_DEBUG_BUFBYTES
1430 we->we_unsynced_bufbytes = wl->wl_bufbytes;
1431 #endif
1432 we->we_reclaimable_bytes = flushsize;
1433 we->we_error = 0;
1434 SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1435
1436 /*
1437 * this flushes bufs in reverse order than they were queued
1438 * it shouldn't matter, but if we care we could use TAILQ instead.
1439 * XXX Note they will get put on the lru queue when they flush
1440 * so we might actually want to change this to preserve order.
1441 */
1442 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
1443 if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1444 continue;
1445 }
1446 bp->b_iodone = wapbl_biodone;
1447 bp->b_private = we;
1448 bremfree(bp);
1449 wapbl_remove_buf_locked(wl, bp);
1450 mutex_exit(&wl->wl_mtx);
1451 mutex_exit(&bufcache_lock);
1452 bawrite(bp);
1453 mutex_enter(&bufcache_lock);
1454 mutex_enter(&wl->wl_mtx);
1455 }
1456 mutex_exit(&wl->wl_mtx);
1457 mutex_exit(&bufcache_lock);
1458
1459 #if 0
1460 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1461 ("wapbl_flush thread %d.%d done flushing entries...\n",
1462 curproc->p_pid, curlwp->l_lid));
1463 #endif
1464
1465 out:
1466
1467 /*
1468 * If the waitfor flag is set, don't return until everything is
1469 * fully flushed and the on disk log is empty.
1470 */
1471 if (waitfor) {
1472 error = wapbl_truncate(wl, wl->wl_circ_size -
1473 wl->wl_reserved_bytes, wapbl_lazy_truncate);
1474 }
1475
1476 out2:
1477 if (error) {
1478 wl->wl_flush_abort(wl->wl_mount, wl->wl_deallocblks,
1479 wl->wl_dealloclens, wl->wl_dealloccnt);
1480 }
1481
1482 #ifdef WAPBL_DEBUG_PRINT
1483 if (error) {
1484 pid_t pid = -1;
1485 lwpid_t lid = -1;
1486 if (curproc)
1487 pid = curproc->p_pid;
1488 if (curlwp)
1489 lid = curlwp->l_lid;
1490 mutex_enter(&wl->wl_mtx);
1491 #ifdef WAPBL_DEBUG_BUFBYTES
1492 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1493 ("wapbl_flush: thread %d.%d aborted flush: "
1494 "error = %d\n"
1495 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1496 "deallocs=%d inodes=%d\n"
1497 "\terrcnt = %d, reclaimable=%zu reserved=%zu "
1498 "unsynced=%zu\n",
1499 pid, lid, error, wl->wl_bufcount,
1500 wl->wl_bufbytes, wl->wl_bcount,
1501 wl->wl_dealloccnt, wl->wl_inohashcnt,
1502 wl->wl_error_count, wl->wl_reclaimable_bytes,
1503 wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1504 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1505 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1506 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1507 "error = %d, unsynced = %zu\n",
1508 we->we_bufcount, we->we_reclaimable_bytes,
1509 we->we_error, we->we_unsynced_bufbytes));
1510 }
1511 #else
1512 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1513 ("wapbl_flush: thread %d.%d aborted flush: "
1514 "error = %d\n"
1515 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1516 "deallocs=%d inodes=%d\n"
1517 "\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
1518 pid, lid, error, wl->wl_bufcount,
1519 wl->wl_bufbytes, wl->wl_bcount,
1520 wl->wl_dealloccnt, wl->wl_inohashcnt,
1521 wl->wl_error_count, wl->wl_reclaimable_bytes,
1522 wl->wl_reserved_bytes));
1523 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1524 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1525 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1526 "error = %d\n", we->we_bufcount,
1527 we->we_reclaimable_bytes, we->we_error));
1528 }
1529 #endif
1530 mutex_exit(&wl->wl_mtx);
1531 }
1532 #endif
1533
1534 rw_exit(&wl->wl_rwlock);
1535 return error;
1536 }
1537
1538 /****************************************************************/
1539
1540 void
1541 wapbl_jlock_assert(struct wapbl *wl)
1542 {
1543
1544 #ifdef WAPBL_DEBUG_SERIALIZE
1545 KASSERT(rw_write_held(&wl->wl_rwlock));
1546 #else
1547 KASSERT(rw_read_held(&wl->wl_rwlock) || rw_write_held(&wl->wl_rwlock));
1548 #endif
1549 }
1550
1551 void
1552 wapbl_junlock_assert(struct wapbl *wl)
1553 {
1554
1555 #ifdef WAPBL_DEBUG_SERIALIZE
1556 KASSERT(!rw_write_held(&wl->wl_rwlock));
1557 #endif
1558 }
1559
1560 /****************************************************************/
1561
1562 /* locks missing */
1563 void
1564 wapbl_print(struct wapbl *wl,
1565 int full,
1566 void (*pr)(const char *, ...))
1567 {
1568 struct buf *bp;
1569 struct wapbl_entry *we;
1570 (*pr)("wapbl %p", wl);
1571 (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
1572 wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
1573 (*pr)("circ = %zu, header = %zu, head = %"PRIdMAX" tail = %"PRIdMAX"\n",
1574 wl->wl_circ_size, wl->wl_circ_off,
1575 (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
1576 (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
1577 wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
1578 #ifdef WAPBL_DEBUG_BUFBYTES
1579 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1580 "reserved = %zu errcnt = %d unsynced = %zu\n",
1581 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1582 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1583 wl->wl_error_count, wl->wl_unsynced_bufbytes);
1584 #else
1585 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1586 "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
1587 wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1588 wl->wl_error_count);
1589 #endif
1590 (*pr)("\tdealloccnt = %d, dealloclim = %d\n",
1591 wl->wl_dealloccnt, wl->wl_dealloclim);
1592 (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
1593 wl->wl_inohashcnt, wl->wl_inohashmask);
1594 (*pr)("entries:\n");
1595 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1596 #ifdef WAPBL_DEBUG_BUFBYTES
1597 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
1598 "unsynced = %zu\n",
1599 we->we_bufcount, we->we_reclaimable_bytes,
1600 we->we_error, we->we_unsynced_bufbytes);
1601 #else
1602 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
1603 we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
1604 #endif
1605 }
1606 if (full) {
1607 int cnt = 0;
1608 (*pr)("bufs =");
1609 LIST_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
1610 if (!LIST_NEXT(bp, b_wapbllist)) {
1611 (*pr)(" %p", bp);
1612 } else if ((++cnt % 6) == 0) {
1613 (*pr)(" %p,\n\t", bp);
1614 } else {
1615 (*pr)(" %p,", bp);
1616 }
1617 }
1618 (*pr)("\n");
1619
1620 (*pr)("dealloced blks = ");
1621 {
1622 int i;
1623 cnt = 0;
1624 for (i = 0; i < wl->wl_dealloccnt; i++) {
1625 (*pr)(" %"PRId64":%d,",
1626 wl->wl_deallocblks[i],
1627 wl->wl_dealloclens[i]);
1628 if ((++cnt % 4) == 0) {
1629 (*pr)("\n\t");
1630 }
1631 }
1632 }
1633 (*pr)("\n");
1634
1635 (*pr)("registered inodes = ");
1636 {
1637 int i;
1638 cnt = 0;
1639 for (i = 0; i <= wl->wl_inohashmask; i++) {
1640 struct wapbl_ino_head *wih;
1641 struct wapbl_ino *wi;
1642
1643 wih = &wl->wl_inohash[i];
1644 LIST_FOREACH(wi, wih, wi_hash) {
1645 if (wi->wi_ino == 0)
1646 continue;
1647 (*pr)(" %"PRId32"/0%06"PRIo32",",
1648 wi->wi_ino, wi->wi_mode);
1649 if ((++cnt % 4) == 0) {
1650 (*pr)("\n\t");
1651 }
1652 }
1653 }
1654 (*pr)("\n");
1655 }
1656 }
1657 }
1658
1659 #if defined(WAPBL_DEBUG) || defined(DDB)
1660 void
1661 wapbl_dump(struct wapbl *wl)
1662 {
1663 #if defined(WAPBL_DEBUG)
1664 if (!wl)
1665 wl = wapbl_debug_wl;
1666 #endif
1667 if (!wl)
1668 return;
1669 wapbl_print(wl, 1, printf);
1670 }
1671 #endif
1672
1673 /****************************************************************/
1674
1675 void
1676 wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len)
1677 {
1678
1679 wapbl_jlock_assert(wl);
1680
1681 /* XXX should eventually instead tie this into resource estimation */
1682 /* XXX this KASSERT needs locking/mutex analysis */
1683 KASSERT(wl->wl_dealloccnt < wl->wl_dealloclim);
1684 wl->wl_deallocblks[wl->wl_dealloccnt] = blk;
1685 wl->wl_dealloclens[wl->wl_dealloccnt] = len;
1686 wl->wl_dealloccnt++;
1687 WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
1688 ("wapbl_register_deallocation: blk=%"PRId64" len=%d\n", blk, len));
1689 }
1690
1691 /****************************************************************/
1692
1693 static void
1694 wapbl_inodetrk_init(struct wapbl *wl, u_int size)
1695 {
1696
1697 wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
1698 if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) {
1699 pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0,
1700 "wapblinopl", &pool_allocator_nointr, IPL_NONE);
1701 }
1702 }
1703
1704 static void
1705 wapbl_inodetrk_free(struct wapbl *wl)
1706 {
1707
1708 /* XXX this KASSERT needs locking/mutex analysis */
1709 KASSERT(wl->wl_inohashcnt == 0);
1710 hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
1711 if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) {
1712 pool_destroy(&wapbl_ino_pool);
1713 }
1714 }
1715
1716 static struct wapbl_ino *
1717 wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
1718 {
1719 struct wapbl_ino_head *wih;
1720 struct wapbl_ino *wi;
1721
1722 KASSERT(mutex_owned(&wl->wl_mtx));
1723
1724 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1725 LIST_FOREACH(wi, wih, wi_hash) {
1726 if (ino == wi->wi_ino)
1727 return wi;
1728 }
1729 return 0;
1730 }
1731
1732 void
1733 wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1734 {
1735 struct wapbl_ino_head *wih;
1736 struct wapbl_ino *wi;
1737
1738 wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
1739
1740 mutex_enter(&wl->wl_mtx);
1741 if (wapbl_inodetrk_get(wl, ino) == NULL) {
1742 wi->wi_ino = ino;
1743 wi->wi_mode = mode;
1744 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1745 LIST_INSERT_HEAD(wih, wi, wi_hash);
1746 wl->wl_inohashcnt++;
1747 WAPBL_PRINTF(WAPBL_PRINT_INODE,
1748 ("wapbl_register_inode: ino=%"PRId64"\n", ino));
1749 mutex_exit(&wl->wl_mtx);
1750 } else {
1751 mutex_exit(&wl->wl_mtx);
1752 pool_put(&wapbl_ino_pool, wi);
1753 }
1754 }
1755
1756 void
1757 wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1758 {
1759 struct wapbl_ino *wi;
1760
1761 mutex_enter(&wl->wl_mtx);
1762 wi = wapbl_inodetrk_get(wl, ino);
1763 if (wi) {
1764 WAPBL_PRINTF(WAPBL_PRINT_INODE,
1765 ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
1766 KASSERT(wl->wl_inohashcnt > 0);
1767 wl->wl_inohashcnt--;
1768 LIST_REMOVE(wi, wi_hash);
1769 mutex_exit(&wl->wl_mtx);
1770
1771 pool_put(&wapbl_ino_pool, wi);
1772 } else {
1773 mutex_exit(&wl->wl_mtx);
1774 }
1775 }
1776
1777 /****************************************************************/
1778
1779 static __inline size_t
1780 wapbl_transaction_inodes_len(struct wapbl *wl)
1781 {
1782 int blocklen = 1<<wl->wl_log_dev_bshift;
1783 int iph;
1784
1785 /* Calculate number of inodes described in a inodelist header */
1786 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
1787 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
1788
1789 KASSERT(iph > 0);
1790
1791 return MAX(1, howmany(wl->wl_inohashcnt, iph))*blocklen;
1792 }
1793
1794
1795 /* Calculate amount of space a transaction will take on disk */
1796 static size_t
1797 wapbl_transaction_len(struct wapbl *wl)
1798 {
1799 int blocklen = 1<<wl->wl_log_dev_bshift;
1800 size_t len;
1801 int bph;
1802
1803 /* Calculate number of blocks described in a blocklist header */
1804 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1805 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1806
1807 KASSERT(bph > 0);
1808
1809 len = wl->wl_bcount;
1810 len += howmany(wl->wl_bufcount, bph)*blocklen;
1811 len += howmany(wl->wl_dealloccnt, bph)*blocklen;
1812 len += wapbl_transaction_inodes_len(wl);
1813
1814 return len;
1815 }
1816
1817 /*
1818 * Perform commit operation
1819 *
1820 * Note that generation number incrementation needs to
1821 * be protected against racing with other invocations
1822 * of wapbl_commit. This is ok since this routine
1823 * is only invoked from wapbl_flush
1824 */
1825 static int
1826 wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
1827 {
1828 struct wapbl_wc_header *wc = wl->wl_wc_header;
1829 struct timespec ts;
1830 int error;
1831 int force = 1;
1832
1833 /* XXX Calc checksum here, instead we do this for now */
1834 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1835 if (error) {
1836 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1837 ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1838 "returned %d\n", wl->wl_devvp->v_rdev, error));
1839 }
1840
1841 wc->wc_head = head;
1842 wc->wc_tail = tail;
1843 wc->wc_checksum = 0;
1844 wc->wc_version = 1;
1845 getnanotime(&ts); /* XXX need higher resolution time here? */
1846 wc->wc_time = ts.tv_sec;;
1847 wc->wc_timensec = ts.tv_nsec;
1848
1849 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1850 ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
1851 (intmax_t)head, (intmax_t)tail));
1852
1853 /*
1854 * XXX if generation will rollover, then first zero
1855 * over second commit header before trying to write both headers.
1856 */
1857
1858 error = wapbl_write(wc, wc->wc_len, wl->wl_devvp,
1859 wl->wl_logpbn + wc->wc_generation % 2);
1860 if (error)
1861 return error;
1862
1863 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1864 if (error) {
1865 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1866 ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1867 "returned %d\n", wl->wl_devvp->v_rdev, error));
1868 }
1869
1870 /*
1871 * If the generation number was zero, write it out a second time.
1872 * This handles initialization and generation number rollover
1873 */
1874 if (wc->wc_generation++ == 0) {
1875 error = wapbl_write_commit(wl, head, tail);
1876 /*
1877 * This panic should be able to be removed if we do the
1878 * zero'ing mentioned above, and we are certain to roll
1879 * back generation number on failure.
1880 */
1881 if (error)
1882 panic("wapbl_write_commit: error writing duplicate "
1883 "log header: %d\n", error);
1884 }
1885 return 0;
1886 }
1887
1888 /* Returns new offset value */
1889 static int
1890 wapbl_write_blocks(struct wapbl *wl, off_t *offp)
1891 {
1892 struct wapbl_wc_blocklist *wc =
1893 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1894 int blocklen = 1<<wl->wl_log_dev_bshift;
1895 int bph;
1896 struct buf *bp;
1897 off_t off = *offp;
1898 int error;
1899
1900 KASSERT(rw_write_held(&wl->wl_rwlock));
1901
1902 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1903 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1904
1905 bp = LIST_FIRST(&wl->wl_bufs);
1906
1907 while (bp) {
1908 int cnt;
1909 struct buf *obp = bp;
1910
1911 KASSERT(bp->b_flags & B_LOCKED);
1912
1913 wc->wc_type = WAPBL_WC_BLOCKS;
1914 wc->wc_len = blocklen;
1915 wc->wc_blkcount = 0;
1916 while (bp && (wc->wc_blkcount < bph)) {
1917 /*
1918 * Make sure all the physical block numbers are up to
1919 * date. If this is not always true on a given
1920 * filesystem, then VOP_BMAP must be called. We
1921 * could call VOP_BMAP here, or else in the filesystem
1922 * specific flush callback, although neither of those
1923 * solutions allow us to take the vnode lock. If a
1924 * filesystem requires that we must take the vnode lock
1925 * to call VOP_BMAP, then we can probably do it in
1926 * bwrite when the vnode lock should already be held
1927 * by the invoking code.
1928 */
1929 KASSERT((bp->b_vp->v_type == VBLK) ||
1930 (bp->b_blkno != bp->b_lblkno));
1931 KASSERT(bp->b_blkno > 0);
1932
1933 wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
1934 wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
1935 wc->wc_len += bp->b_bcount;
1936 wc->wc_blkcount++;
1937 bp = LIST_NEXT(bp, b_wapbllist);
1938 }
1939 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1940 ("wapbl_write_blocks: len = %u off = %"PRIdMAX"\n",
1941 wc->wc_len, (intmax_t)off));
1942
1943 error = wapbl_circ_write(wl, wc, blocklen, &off);
1944 if (error)
1945 return error;
1946 bp = obp;
1947 cnt = 0;
1948 while (bp && (cnt++ < bph)) {
1949 error = wapbl_circ_write(wl, bp->b_data,
1950 bp->b_bcount, &off);
1951 if (error)
1952 return error;
1953 bp = LIST_NEXT(bp, b_wapbllist);
1954 }
1955 }
1956 *offp = off;
1957 return 0;
1958 }
1959
1960 static int
1961 wapbl_write_revocations(struct wapbl *wl, off_t *offp)
1962 {
1963 struct wapbl_wc_blocklist *wc =
1964 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1965 int i;
1966 int blocklen = 1<<wl->wl_log_dev_bshift;
1967 int bph;
1968 off_t off = *offp;
1969 int error;
1970
1971 if (wl->wl_dealloccnt == 0)
1972 return 0;
1973
1974 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1975 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1976
1977 i = 0;
1978 while (i < wl->wl_dealloccnt) {
1979 wc->wc_type = WAPBL_WC_REVOCATIONS;
1980 wc->wc_len = blocklen;
1981 wc->wc_blkcount = 0;
1982 while ((i < wl->wl_dealloccnt) && (wc->wc_blkcount < bph)) {
1983 wc->wc_blocks[wc->wc_blkcount].wc_daddr =
1984 wl->wl_deallocblks[i];
1985 wc->wc_blocks[wc->wc_blkcount].wc_dlen =
1986 wl->wl_dealloclens[i];
1987 wc->wc_blkcount++;
1988 i++;
1989 }
1990 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1991 ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
1992 wc->wc_len, (intmax_t)off));
1993 error = wapbl_circ_write(wl, wc, blocklen, &off);
1994 if (error)
1995 return error;
1996 }
1997 *offp = off;
1998 return 0;
1999 }
2000
2001 static int
2002 wapbl_write_inodes(struct wapbl *wl, off_t *offp)
2003 {
2004 struct wapbl_wc_inodelist *wc =
2005 (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
2006 int i;
2007 int blocklen = 1<<wl->wl_log_dev_bshift;
2008 off_t off = *offp;
2009 int error;
2010
2011 struct wapbl_ino_head *wih;
2012 struct wapbl_ino *wi;
2013 int iph;
2014
2015 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2016 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2017
2018 i = 0;
2019 wih = &wl->wl_inohash[0];
2020 wi = 0;
2021 do {
2022 wc->wc_type = WAPBL_WC_INODES;
2023 wc->wc_len = blocklen;
2024 wc->wc_inocnt = 0;
2025 wc->wc_clear = (i == 0);
2026 while ((i < wl->wl_inohashcnt) && (wc->wc_inocnt < iph)) {
2027 while (!wi) {
2028 KASSERT((wih - &wl->wl_inohash[0])
2029 <= wl->wl_inohashmask);
2030 wi = LIST_FIRST(wih++);
2031 }
2032 wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2033 wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2034 wc->wc_inocnt++;
2035 i++;
2036 wi = LIST_NEXT(wi, wi_hash);
2037 }
2038 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2039 ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2040 wc->wc_len, (intmax_t)off));
2041 error = wapbl_circ_write(wl, wc, blocklen, &off);
2042 if (error)
2043 return error;
2044 } while (i < wl->wl_inohashcnt);
2045
2046 *offp = off;
2047 return 0;
2048 }
2049
2050 #endif /* _KERNEL */
2051
2052 /****************************************************************/
2053
2054 #ifdef _KERNEL
2055 static struct pool wapbl_blk_pool;
2056 static int wapbl_blk_pool_refcount;
2057 #endif
2058 struct wapbl_blk {
2059 LIST_ENTRY(wapbl_blk) wb_hash;
2060 daddr_t wb_blk;
2061 off_t wb_off; /* Offset of this block in the log */
2062 };
2063 #define WAPBL_BLKPOOL_MIN 83
2064
2065 static void
2066 wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2067 {
2068 if (size < WAPBL_BLKPOOL_MIN)
2069 size = WAPBL_BLKPOOL_MIN;
2070 KASSERT(wr->wr_blkhash == 0);
2071 #ifdef _KERNEL
2072 wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2073 if (atomic_inc_uint_nv(&wapbl_blk_pool_refcount) == 1) {
2074 pool_init(&wapbl_blk_pool, sizeof(struct wapbl_blk), 0, 0, 0,
2075 "wapblblkpl", &pool_allocator_nointr, IPL_NONE);
2076 }
2077 #else /* ! _KERNEL */
2078 /* Manually implement hashinit */
2079 {
2080 int i;
2081 unsigned long hashsize;
2082 for (hashsize = 1; hashsize < size; hashsize <<= 1)
2083 continue;
2084 wr->wr_blkhash = wapbl_malloc(hashsize * sizeof(*wr->wr_blkhash));
2085 for (i = 0; i < wr->wr_blkhashmask; i++)
2086 LIST_INIT(&wr->wr_blkhash[i]);
2087 wr->wr_blkhashmask = hashsize - 1;
2088 }
2089 #endif /* ! _KERNEL */
2090 }
2091
2092 static void
2093 wapbl_blkhash_free(struct wapbl_replay *wr)
2094 {
2095 KASSERT(wr->wr_blkhashcnt == 0);
2096 #ifdef _KERNEL
2097 hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2098 if (atomic_dec_uint_nv(&wapbl_blk_pool_refcount) == 0) {
2099 pool_destroy(&wapbl_blk_pool);
2100 }
2101 #else /* ! _KERNEL */
2102 wapbl_free(wr->wr_blkhash);
2103 #endif /* ! _KERNEL */
2104 }
2105
2106 static struct wapbl_blk *
2107 wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2108 {
2109 struct wapbl_blk_head *wbh;
2110 struct wapbl_blk *wb;
2111 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2112 LIST_FOREACH(wb, wbh, wb_hash) {
2113 if (blk == wb->wb_blk)
2114 return wb;
2115 }
2116 return 0;
2117 }
2118
2119 static void
2120 wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2121 {
2122 struct wapbl_blk_head *wbh;
2123 struct wapbl_blk *wb;
2124 wb = wapbl_blkhash_get(wr, blk);
2125 if (wb) {
2126 KASSERT(wb->wb_blk == blk);
2127 wb->wb_off = off;
2128 } else {
2129 #ifdef _KERNEL
2130 wb = pool_get(&wapbl_blk_pool, PR_WAITOK);
2131 #else /* ! _KERNEL */
2132 wb = wapbl_malloc(sizeof(*wb));
2133 #endif /* ! _KERNEL */
2134 wb->wb_blk = blk;
2135 wb->wb_off = off;
2136 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2137 LIST_INSERT_HEAD(wbh, wb, wb_hash);
2138 wr->wr_blkhashcnt++;
2139 }
2140 }
2141
2142 static void
2143 wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2144 {
2145 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2146 if (wb) {
2147 KASSERT(wr->wr_blkhashcnt > 0);
2148 wr->wr_blkhashcnt--;
2149 LIST_REMOVE(wb, wb_hash);
2150 #ifdef _KERNEL
2151 pool_put(&wapbl_blk_pool, wb);
2152 #else /* ! _KERNEL */
2153 wapbl_free(wb);
2154 #endif /* ! _KERNEL */
2155 }
2156 }
2157
2158 static void
2159 wapbl_blkhash_clear(struct wapbl_replay *wr)
2160 {
2161 int i;
2162 for (i = 0; i <= wr->wr_blkhashmask; i++) {
2163 struct wapbl_blk *wb;
2164
2165 while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2166 KASSERT(wr->wr_blkhashcnt > 0);
2167 wr->wr_blkhashcnt--;
2168 LIST_REMOVE(wb, wb_hash);
2169 #ifdef _KERNEL
2170 pool_put(&wapbl_blk_pool, wb);
2171 #else /* ! _KERNEL */
2172 wapbl_free(wb);
2173 #endif /* ! _KERNEL */
2174 }
2175 }
2176 KASSERT(wr->wr_blkhashcnt == 0);
2177 }
2178
2179 /****************************************************************/
2180
2181 static int
2182 wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2183 {
2184 size_t slen;
2185 struct wapbl_wc_header *wc = &wr->wr_wc_header;
2186 off_t off = *offp;
2187 int error;
2188
2189 KASSERT(((len >> wc->wc_log_dev_bshift) <<
2190 wc->wc_log_dev_bshift) == len);
2191 if (off < wc->wc_circ_off)
2192 off = wc->wc_circ_off;
2193 slen = wc->wc_circ_off + wc->wc_circ_size - off;
2194 if (slen < len) {
2195 error = wapbl_read(data, slen, wr->wr_devvp,
2196 wr->wr_logpbn + (off >> wc->wc_log_dev_bshift));
2197 if (error)
2198 return error;
2199 data = (uint8_t *)data + slen;
2200 len -= slen;
2201 off = wc->wc_circ_off;
2202 }
2203 error = wapbl_read(data, len, wr->wr_devvp,
2204 wr->wr_logpbn + (off >> wc->wc_log_dev_bshift));
2205 if (error)
2206 return error;
2207 off += len;
2208 if (off >= wc->wc_circ_off + wc->wc_circ_size)
2209 off = wc->wc_circ_off;
2210 *offp = off;
2211 return 0;
2212 }
2213
2214 static void
2215 wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2216 {
2217 size_t slen;
2218 struct wapbl_wc_header *wc = &wr->wr_wc_header;
2219 off_t off = *offp;
2220
2221 KASSERT(((len >> wc->wc_log_dev_bshift) <<
2222 wc->wc_log_dev_bshift) == len);
2223
2224 if (off < wc->wc_circ_off)
2225 off = wc->wc_circ_off;
2226 slen = wc->wc_circ_off + wc->wc_circ_size - off;
2227 if (slen < len) {
2228 len -= slen;
2229 off = wc->wc_circ_off;
2230 }
2231 off += len;
2232 if (off >= wc->wc_circ_off + wc->wc_circ_size)
2233 off = wc->wc_circ_off;
2234 *offp = off;
2235 }
2236
2237 /****************************************************************/
2238
2239 int
2240 wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2241 daddr_t off, size_t count, size_t blksize)
2242 {
2243 struct wapbl_replay *wr;
2244 int error;
2245 struct vnode *devvp;
2246 daddr_t logpbn;
2247 uint8_t *scratch;
2248 struct wapbl_wc_header *wch;
2249 struct wapbl_wc_header *wch2;
2250 /* Use this until we read the actual log header */
2251 int log_dev_bshift = DEV_BSHIFT;
2252 size_t used;
2253
2254 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2255 ("wapbl_replay_start: vp=%p off=%"PRId64 " count=%zu blksize=%zu\n",
2256 vp, off, count, blksize));
2257
2258 if (off < 0)
2259 return EINVAL;
2260
2261 if (blksize < DEV_BSIZE)
2262 return EINVAL;
2263 if (blksize % DEV_BSIZE)
2264 return EINVAL;
2265
2266 #ifdef _KERNEL
2267 #if 0
2268 /* XXX vp->v_size isn't reliably set for VBLK devices,
2269 * especially root. However, we might still want to verify
2270 * that the full load is readable */
2271 if ((off + count) * blksize > vp->v_size)
2272 return EINVAL;
2273 #endif
2274
2275 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2276 return error;
2277 }
2278 #else /* ! _KERNEL */
2279 devvp = vp;
2280 logpbn = off;
2281 #endif /* ! _KERNEL */
2282
2283 scratch = wapbl_malloc(MAXBSIZE);
2284
2285 error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, logpbn);
2286 if (error)
2287 goto errout;
2288
2289 wch = (struct wapbl_wc_header *)scratch;
2290 wch2 =
2291 (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
2292 /* XXX verify checksums and magic numbers */
2293 if (wch->wc_type != WAPBL_WC_HEADER) {
2294 printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
2295 error = EFTYPE;
2296 goto errout;
2297 }
2298
2299 if (wch2->wc_generation > wch->wc_generation)
2300 wch = wch2;
2301
2302 wr = wapbl_calloc(1, sizeof(*wr));
2303
2304 wr->wr_logvp = vp;
2305 wr->wr_devvp = devvp;
2306 wr->wr_logpbn = logpbn;
2307
2308 wr->wr_scratch = scratch;
2309
2310 memcpy(&wr->wr_wc_header, wch, sizeof(wr->wr_wc_header));
2311
2312 used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
2313
2314 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2315 ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
2316 " len=%"PRId64" used=%zu\n",
2317 wch->wc_head, wch->wc_tail, wch->wc_circ_off,
2318 wch->wc_circ_size, used));
2319
2320 wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
2321 error = wapbl_replay_prescan(wr);
2322 if (error) {
2323 wapbl_replay_stop(wr);
2324 wapbl_replay_free(wr);
2325 return error;
2326 }
2327
2328 error = wapbl_replay_get_inodes(wr);
2329 if (error) {
2330 wapbl_replay_stop(wr);
2331 wapbl_replay_free(wr);
2332 return error;
2333 }
2334
2335 *wrp = wr;
2336 return 0;
2337
2338 errout:
2339 wapbl_free(scratch);
2340 return error;
2341 }
2342
2343 void
2344 wapbl_replay_stop(struct wapbl_replay *wr)
2345 {
2346
2347 WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
2348
2349 KDASSERT(wapbl_replay_isopen(wr));
2350
2351 wapbl_free(wr->wr_scratch);
2352 wr->wr_scratch = 0;
2353
2354 wr->wr_logvp = 0;
2355
2356 wapbl_blkhash_clear(wr);
2357 wapbl_blkhash_free(wr);
2358 }
2359
2360 void
2361 wapbl_replay_free(struct wapbl_replay *wr)
2362 {
2363
2364 KDASSERT(!wapbl_replay_isopen(wr));
2365
2366 if (wr->wr_inodes)
2367 wapbl_free(wr->wr_inodes);
2368 wapbl_free(wr);
2369 }
2370
2371 int
2372 wapbl_replay_isopen1(struct wapbl_replay *wr)
2373 {
2374
2375 return wapbl_replay_isopen(wr);
2376 }
2377
2378 static int
2379 wapbl_replay_prescan(struct wapbl_replay *wr)
2380 {
2381 off_t off;
2382 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2383 int error;
2384
2385 int logblklen = 1<<wch->wc_log_dev_bshift;
2386 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2387
2388 wapbl_blkhash_clear(wr);
2389
2390 off = wch->wc_tail;
2391 while (off != wch->wc_head) {
2392 struct wapbl_wc_null *wcn;
2393 off_t saveoff = off;
2394 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2395 if (error)
2396 goto errout;
2397 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2398 switch (wcn->wc_type) {
2399 case WAPBL_WC_BLOCKS:
2400 {
2401 struct wapbl_wc_blocklist *wc =
2402 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2403 int i;
2404 for (i = 0; i < wc->wc_blkcount; i++) {
2405 int j, n;
2406 /*
2407 * Enter each physical block into the
2408 * hashtable independently
2409 */
2410 n = wc->wc_blocks[i].wc_dlen >>
2411 wch->wc_fs_dev_bshift;
2412 for (j = 0; j < n; j++) {
2413 wapbl_blkhash_ins(wr,
2414 wc->wc_blocks[i].wc_daddr + j,
2415 off);
2416 wapbl_circ_advance(wr,
2417 fsblklen, &off);
2418 }
2419 }
2420 }
2421 break;
2422
2423 case WAPBL_WC_REVOCATIONS:
2424 {
2425 struct wapbl_wc_blocklist *wc =
2426 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2427 int i;
2428 for (i = 0; i < wc->wc_blkcount; i++) {
2429 int j, n;
2430 /*
2431 * Remove any blocks found from the
2432 * hashtable
2433 */
2434 n = wc->wc_blocks[i].wc_dlen >>
2435 wch->wc_fs_dev_bshift;
2436 for (j = 0; j < n; j++) {
2437 wapbl_blkhash_rem(wr,
2438 wc->wc_blocks[i].wc_daddr + j);
2439 }
2440 }
2441 }
2442 break;
2443
2444 case WAPBL_WC_INODES:
2445 {
2446 struct wapbl_wc_inodelist *wc =
2447 (struct wapbl_wc_inodelist *)wr->wr_scratch;
2448 /*
2449 * Keep track of where we found this so we
2450 * can use it later
2451 */
2452 if (wc->wc_clear) {
2453 wr->wr_inodestail = saveoff;
2454 wr->wr_inodescnt = 0;
2455 }
2456 if (wr->wr_inodestail)
2457 wr->wr_inodeshead = off;
2458 wr->wr_inodescnt += wc->wc_inocnt;
2459 }
2460 break;
2461 default:
2462 printf("Unrecognized wapbl type: 0x%08x\n",
2463 wcn->wc_type);
2464 error = EFTYPE;
2465 goto errout;
2466 }
2467 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2468 if (off != saveoff) {
2469 printf("wapbl_replay: corrupted records\n");
2470 error = EFTYPE;
2471 goto errout;
2472 }
2473 }
2474 return 0;
2475
2476 errout:
2477 wapbl_blkhash_clear(wr);
2478 return error;
2479 }
2480
2481 static int
2482 wapbl_replay_get_inodes(struct wapbl_replay *wr)
2483 {
2484 off_t off;
2485 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2486 int logblklen = 1<<wch->wc_log_dev_bshift;
2487 int cnt= 0;
2488
2489 KDASSERT(wapbl_replay_isopen(wr));
2490
2491 if (wr->wr_inodescnt == 0)
2492 return 0;
2493
2494 KASSERT(!wr->wr_inodes);
2495
2496 wr->wr_inodes = wapbl_malloc(wr->wr_inodescnt*sizeof(wr->wr_inodes[0]));
2497
2498 off = wr->wr_inodestail;
2499
2500 while (off != wr->wr_inodeshead) {
2501 struct wapbl_wc_null *wcn;
2502 int error;
2503 off_t saveoff = off;
2504 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2505 if (error) {
2506 wapbl_free(wr->wr_inodes);
2507 wr->wr_inodes = 0;
2508 return error;
2509 }
2510 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2511 switch (wcn->wc_type) {
2512 case WAPBL_WC_BLOCKS:
2513 case WAPBL_WC_REVOCATIONS:
2514 break;
2515 case WAPBL_WC_INODES:
2516 {
2517 struct wapbl_wc_inodelist *wc =
2518 (struct wapbl_wc_inodelist *)wr->wr_scratch;
2519 /*
2520 * Keep track of where we found this so we
2521 * can use it later
2522 */
2523 if (wc->wc_clear) {
2524 cnt = 0;
2525 }
2526 /* This memcpy assumes that wr_inodes is
2527 * laid out the same as wc_inodes. */
2528 memcpy(&wr->wr_inodes[cnt], wc->wc_inodes,
2529 wc->wc_inocnt*sizeof(wc->wc_inodes[0]));
2530 cnt += wc->wc_inocnt;
2531 }
2532 break;
2533 default:
2534 KASSERT(0);
2535 }
2536 off = saveoff;
2537 wapbl_circ_advance(wr, wcn->wc_len, &off);
2538 }
2539 KASSERT(cnt == wr->wr_inodescnt);
2540 return 0;
2541 }
2542
2543 #ifdef DEBUG
2544 int
2545 wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
2546 {
2547 off_t off;
2548 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2549 int mismatchcnt = 0;
2550 int logblklen = 1<<wch->wc_log_dev_bshift;
2551 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2552 void *scratch1 = wapbl_malloc(MAXBSIZE);
2553 void *scratch2 = wapbl_malloc(MAXBSIZE);
2554 int error = 0;
2555
2556 KDASSERT(wapbl_replay_isopen(wr));
2557
2558 off = wch->wc_tail;
2559 while (off != wch->wc_head) {
2560 struct wapbl_wc_null *wcn;
2561 #ifdef DEBUG
2562 off_t saveoff = off;
2563 #endif
2564 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2565 if (error)
2566 goto out;
2567 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2568 switch (wcn->wc_type) {
2569 case WAPBL_WC_BLOCKS:
2570 {
2571 struct wapbl_wc_blocklist *wc =
2572 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2573 int i;
2574 for (i = 0; i < wc->wc_blkcount; i++) {
2575 int foundcnt = 0;
2576 int dirtycnt = 0;
2577 int j, n;
2578 /*
2579 * Check each physical block into the
2580 * hashtable independently
2581 */
2582 n = wc->wc_blocks[i].wc_dlen >>
2583 wch->wc_fs_dev_bshift;
2584 for (j = 0; j < n; j++) {
2585 struct wapbl_blk *wb =
2586 wapbl_blkhash_get(wr,
2587 wc->wc_blocks[i].wc_daddr + j);
2588 if (wb && (wb->wb_off == off)) {
2589 foundcnt++;
2590 error =
2591 wapbl_circ_read(wr,
2592 scratch1, fsblklen,
2593 &off);
2594 if (error)
2595 goto out;
2596 error =
2597 wapbl_read(scratch2,
2598 fsblklen, fsdevvp,
2599 wb->wb_blk);
2600 if (error)
2601 goto out;
2602 if (memcmp(scratch1,
2603 scratch2,
2604 fsblklen)) {
2605 printf(
2606 "wapbl_verify: mismatch block %"PRId64" at off %"PRIdMAX"\n",
2607 wb->wb_blk, (intmax_t)off);
2608 dirtycnt++;
2609 mismatchcnt++;
2610 }
2611 } else {
2612 wapbl_circ_advance(wr,
2613 fsblklen, &off);
2614 }
2615 }
2616 #if 0
2617 /*
2618 * If all of the blocks in an entry
2619 * are clean, then remove all of its
2620 * blocks from the hashtable since they
2621 * never will need replay.
2622 */
2623 if ((foundcnt != 0) &&
2624 (dirtycnt == 0)) {
2625 off = saveoff;
2626 wapbl_circ_advance(wr,
2627 logblklen, &off);
2628 for (j = 0; j < n; j++) {
2629 struct wapbl_blk *wb =
2630 wapbl_blkhash_get(wr,
2631 wc->wc_blocks[i].wc_daddr + j);
2632 if (wb &&
2633 (wb->wb_off == off)) {
2634 wapbl_blkhash_rem(wr, wb->wb_blk);
2635 }
2636 wapbl_circ_advance(wr,
2637 fsblklen, &off);
2638 }
2639 }
2640 #endif
2641 }
2642 }
2643 break;
2644 case WAPBL_WC_REVOCATIONS:
2645 case WAPBL_WC_INODES:
2646 break;
2647 default:
2648 KASSERT(0);
2649 }
2650 #ifdef DEBUG
2651 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2652 KASSERT(off == saveoff);
2653 #endif
2654 }
2655 out:
2656 wapbl_free(scratch1);
2657 wapbl_free(scratch2);
2658 if (!error && mismatchcnt)
2659 error = EFTYPE;
2660 return error;
2661 }
2662 #endif
2663
2664 int
2665 wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
2666 {
2667 off_t off;
2668 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2669 int logblklen = 1<<wch->wc_log_dev_bshift;
2670 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2671 void *scratch1 = wapbl_malloc(MAXBSIZE);
2672 int error = 0;
2673
2674 KDASSERT(wapbl_replay_isopen(wr));
2675
2676 /*
2677 * This parses the journal for replay, although it could
2678 * just as easily walk the hashtable instead.
2679 */
2680
2681 off = wch->wc_tail;
2682 while (off != wch->wc_head) {
2683 struct wapbl_wc_null *wcn;
2684 #ifdef DEBUG
2685 off_t saveoff = off;
2686 #endif
2687 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2688 if (error)
2689 goto out;
2690 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2691 switch (wcn->wc_type) {
2692 case WAPBL_WC_BLOCKS:
2693 {
2694 struct wapbl_wc_blocklist *wc =
2695 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2696 int i;
2697 for (i = 0; i < wc->wc_blkcount; i++) {
2698 int j, n;
2699 /*
2700 * Check each physical block against
2701 * the hashtable independently
2702 */
2703 n = wc->wc_blocks[i].wc_dlen >>
2704 wch->wc_fs_dev_bshift;
2705 for (j = 0; j < n; j++) {
2706 struct wapbl_blk *wb =
2707 wapbl_blkhash_get(wr,
2708 wc->wc_blocks[i].wc_daddr + j);
2709 if (wb && (wb->wb_off == off)) {
2710 error = wapbl_circ_read(
2711 wr, scratch1,
2712 fsblklen, &off);
2713 if (error)
2714 goto out;
2715 error =
2716 wapbl_write(scratch1,
2717 fsblklen, fsdevvp,
2718 wb->wb_blk);
2719 if (error)
2720 goto out;
2721 } else {
2722 wapbl_circ_advance(wr,
2723 fsblklen, &off);
2724 }
2725 }
2726 }
2727 }
2728 break;
2729 case WAPBL_WC_REVOCATIONS:
2730 case WAPBL_WC_INODES:
2731 break;
2732 default:
2733 KASSERT(0);
2734 }
2735 #ifdef DEBUG
2736 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2737 KASSERT(off == saveoff);
2738 #endif
2739 }
2740 out:
2741 wapbl_free(scratch1);
2742 return error;
2743 }
2744
2745 int
2746 wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
2747 {
2748 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2749 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2750
2751 KDASSERT(wapbl_replay_isopen(wr));
2752
2753 KASSERT((len % fsblklen) == 0);
2754
2755 while (len != 0) {
2756 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2757 if (wb) {
2758 off_t off = wb->wb_off;
2759 int error;
2760 error = wapbl_circ_read(wr, data, fsblklen, &off);
2761 if (error)
2762 return error;
2763 }
2764 data = (uint8_t *)data + fsblklen;
2765 len -= fsblklen;
2766 blk++;
2767 }
2768 return 0;
2769 }
2770