vfs_wapbl.c revision 1.1.2.8 1 /* $NetBSD: vfs_wapbl.c,v 1.1.2.8 2008/06/30 01:31:53 oster Exp $ */
2
3 /*-
4 * Copyright (c) 2003,2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This implements file system independent write ahead filesystem logging.
34 */
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.1.2.8 2008/06/30 01:31:53 oster Exp $");
37
38 #include <sys/param.h>
39
40 #ifdef _KERNEL
41 #include <sys/param.h>
42 #include <sys/namei.h>
43 #include <sys/proc.h>
44 #include <sys/uio.h>
45 #include <sys/vnode.h>
46 #include <sys/file.h>
47 #include <sys/malloc.h>
48 #include <sys/resourcevar.h>
49 #include <sys/conf.h>
50 #include <sys/mount.h>
51 #include <sys/kernel.h>
52 #include <sys/kauth.h>
53 #include <sys/mutex.h>
54 #include <sys/wapbl.h>
55
56 #if WAPBL_UVM_ALLOC
57 #include <uvm/uvm.h>
58 #endif
59
60 #include <miscfs/specfs/specdev.h>
61
62 MALLOC_JUSTDEFINE(M_WAPBL, "wapbl", "write-ahead physical block logging");
63 #define wapbl_malloc(s) malloc((s), M_WAPBL, M_WAITOK)
64 #define wapbl_free(a) free((a), M_WAPBL)
65 #define wapbl_calloc(n, s) malloc((n)*(s), M_WAPBL, M_WAITOK | M_ZERO)
66
67 #else /* !_KERNEL */
68 #include <assert.h>
69 #include <errno.h>
70 #include <stdio.h>
71 #include <stdbool.h>
72 #include <stdlib.h>
73 #include <string.h>
74
75 #include <sys/time.h>
76 #include <sys/wapbl.h>
77
78 #define KDASSERT(x) assert(x)
79 #define KASSERT(x) assert(x)
80 #define wapbl_malloc(s) malloc(s)
81 #define wapbl_free(a) free(a)
82 #define wapbl_calloc(n, s) calloc((n), (s))
83
84 #endif /* !_KERNEL */
85
86 /*
87 * INTERNAL DATA STRUCTURES
88 */
89
90 /*
91 * This structure holds per-mount log information.
92 *
93 * Legend: a = atomic access only
94 * r = read-only after init
95 * l = rwlock held
96 * m = mutex held
97 * u = unlocked access ok
98 * b = bufcache_lock held
99 */
100 struct wapbl {
101 struct vnode *wl_logvp; /* r: log here */
102 struct vnode *wl_devvp; /* r: log on this device */
103 struct mount *wl_mount; /* r: mountpoint wl is associated with */
104 daddr_t wl_logpbn; /* r: Physical block number of start of log */
105 int wl_log_dev_bshift; /* r: logarithm of device block size of log
106 device */
107 int wl_fs_dev_bshift; /* r: logarithm of device block size of
108 filesystem device */
109
110 unsigned wl_lock_count; /* a: Count of transactions in progress */
111
112 size_t wl_circ_size; /* r: Number of bytes in buffer of log */
113 size_t wl_circ_off; /* r: Number of bytes reserved at start */
114
115 size_t wl_bufcount_max; /* r: Number of buffers reserved for log */
116 size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */
117
118 off_t wl_head; /* l: Byte offset of log head */
119 off_t wl_tail; /* l: Byte offset of log tail */
120 /*
121 * head == tail == 0 means log is empty
122 * head == tail != 0 means log is full
123 * see assertions in wapbl_advance() for other boundary conditions.
124 * only truncate moves the tail, except when flush sets it to
125 * wl_header_size only flush moves the head, except when truncate
126 * sets it to 0.
127 */
128
129 struct wapbl_wc_header *wl_wc_header; /* l */
130 void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */
131
132 kmutex_t wl_mtx; /* u: short-term lock */
133 krwlock_t wl_rwlock; /* u: File system transaction lock */
134
135 /*
136 * Must be held while accessing
137 * wl_count or wl_bufs or head or tail
138 */
139
140 /*
141 * Callback called from within the flush routine to flush any extra
142 * bits. Note that flush may be skipped without calling this if
143 * there are no outstanding buffers in the transaction.
144 */
145 wapbl_flush_fn_t wl_flush; /* r */
146 wapbl_flush_fn_t wl_flush_abort;/* r */
147
148 size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */
149 size_t wl_bufcount; /* m: Count of buffers in wl_bufs */
150 size_t wl_bcount; /* m: Total bcount of wl_bufs */
151
152 LIST_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
153
154 kcondvar_t wl_reclaimable_cv; /* m (obviously) */
155 size_t wl_reclaimable_bytes; /* m: Amount of space available for
156 reclamation by truncate */
157 int wl_error_count; /* m: # of wl_entries with errors */
158 size_t wl_reserved_bytes; /* never truncate log smaller than this */
159
160 #ifdef WAPBL_DEBUG_BUFBYTES
161 size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
162 #endif
163
164 daddr_t *wl_deallocblks;/* l: address of block */
165 int *wl_dealloclens; /* l: size of block (fragments, kom ihg) */
166 int wl_dealloccnt; /* l: total count */
167 int wl_dealloclim; /* l: max count */
168
169 /* hashtable of inode numbers for allocated but unlinked inodes */
170 /* synch ??? */
171 LIST_HEAD(wapbl_ino_head, wapbl_ino) *wl_inohash;
172 u_long wl_inohashmask;
173 int wl_inohashcnt;
174
175 SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* On disk transaction
176 accounting */
177 };
178
179 #ifdef WAPBL_DEBUG_PRINT
180 int wapbl_debug_print = WAPBL_DEBUG_PRINT;
181 #endif
182
183 /****************************************************************/
184 #ifdef _KERNEL
185
186 #ifdef WAPBL_DEBUG
187 struct wapbl *wapbl_debug_wl;
188 #endif
189
190 static int wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail);
191 static int wapbl_write_blocks(struct wapbl *wl, off_t *offp);
192 static int wapbl_write_revocations(struct wapbl *wl, off_t *offp);
193 static int wapbl_write_inodes(struct wapbl *wl, off_t *offp);
194 #endif /* _KERNEL */
195
196 static int wapbl_replay_prescan(struct wapbl_replay *wr);
197 static int wapbl_replay_get_inodes(struct wapbl_replay *wr);
198
199 static __inline size_t wapbl_space_free(size_t avail, off_t head,
200 off_t tail);
201 static __inline size_t wapbl_space_used(size_t avail, off_t head,
202 off_t tail);
203
204 #ifdef _KERNEL
205
206 #define WAPBL_INODETRK_SIZE 83
207 static int wapbl_ino_pool_refcount;
208 static struct pool wapbl_ino_pool;
209 struct wapbl_ino {
210 LIST_ENTRY(wapbl_ino) wi_hash;
211 ino_t wi_ino;
212 mode_t wi_mode;
213 };
214
215 static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
216 static void wapbl_inodetrk_free(struct wapbl *wl);
217 static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
218
219 static size_t wapbl_transaction_len(struct wapbl *wl);
220 static __inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
221
222 /*
223 * This is useful for debugging. If set, the log will
224 * only be truncated when necessary.
225 */
226 int wapbl_lazy_truncate = 0;
227
228 struct wapbl_ops wapbl_ops = {
229 .wo_wapbl_discard = wapbl_discard,
230 .wo_wapbl_replay_isopen = wapbl_replay_isopen1,
231 .wo_wapbl_replay_read = wapbl_replay_read,
232 .wo_wapbl_add_buf = wapbl_add_buf,
233 .wo_wapbl_remove_buf = wapbl_remove_buf,
234 .wo_wapbl_resize_buf = wapbl_resize_buf,
235 .wo_wapbl_begin = wapbl_begin,
236 .wo_wapbl_end = wapbl_end,
237 .wo_wapbl_junlock_assert= wapbl_junlock_assert,
238
239 /* XXX: the following is only used to say "this is a wapbl buf" */
240 .wo_wapbl_biodone = wapbl_biodone,
241 };
242
243 void
244 wapbl_init()
245 {
246
247 malloc_type_attach(M_WAPBL);
248 }
249
250 int
251 wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
252 daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
253 wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
254 {
255 struct wapbl *wl;
256 struct vnode *devvp;
257 daddr_t logpbn;
258 int error;
259 int log_dev_bshift = DEV_BSHIFT;
260 int fs_dev_bshift = DEV_BSHIFT;
261 int run;
262
263 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_start: vp=%p off=%" PRId64
264 " count=%zu blksize=%zu\n", vp, off, count, blksize));
265
266 if (log_dev_bshift > fs_dev_bshift) {
267 WAPBL_PRINTF(WAPBL_PRINT_OPEN,
268 ("wapbl: log device's block size cannot be larger "
269 "than filesystem's\n"));
270 /*
271 * Not currently implemented, although it could be if
272 * needed someday.
273 */
274 return ENOSYS;
275 }
276
277 if (off < 0)
278 return EINVAL;
279
280 if (blksize < DEV_BSIZE)
281 return EINVAL;
282 if (blksize % DEV_BSIZE)
283 return EINVAL;
284
285 /* XXXTODO: verify that the full load is writable */
286
287 /*
288 * XXX check for minimum log size
289 * minimum is governed by minimum amount of space
290 * to complete a transaction. (probably truncate)
291 */
292 /* XXX for now pick something minimal */
293 if ((count * blksize) < MAXPHYS) {
294 return ENOSPC;
295 }
296
297 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
298 return error;
299 }
300
301 wl = wapbl_calloc(1, sizeof(*wl));
302 rw_init(&wl->wl_rwlock);
303 mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
304 cv_init(&wl->wl_reclaimable_cv, "wapblrec");
305 LIST_INIT(&wl->wl_bufs);
306 SIMPLEQ_INIT(&wl->wl_entries);
307
308 wl->wl_logvp = vp;
309 wl->wl_devvp = devvp;
310 wl->wl_mount = mp;
311 wl->wl_logpbn = logpbn;
312 wl->wl_log_dev_bshift = log_dev_bshift;
313 wl->wl_fs_dev_bshift = fs_dev_bshift;
314
315 wl->wl_flush = flushfn;
316 wl->wl_flush_abort = flushabortfn;
317
318 /* Reserve two log device blocks for the commit headers */
319 wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
320 wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
321 /* truncate the log usage to a multiple of log_dev_bshift */
322 wl->wl_circ_size >>= wl->wl_log_dev_bshift;
323 wl->wl_circ_size <<= wl->wl_log_dev_bshift;
324
325 /*
326 * wl_bufbytes_max limits the size of the in memory transaction space.
327 * - Since buffers are allocated and accounted for in units of
328 * PAGE_SIZE it is required to be a multiple of PAGE_SIZE
329 * (i.e. 1<<PAGE_SHIFT)
330 * - Since the log device has to be written in units of
331 * 1<<wl_log_dev_bshift it is required to be a mulitple of
332 * 1<<wl_log_dev_bshift.
333 * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
334 * it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
335 * Therefore it must be multiple of the least common multiple of those
336 * three quantities. Fortunately, all of those quantities are
337 * guaranteed to be a power of two, and the least common multiple of
338 * a set of numbers which are all powers of two is simply the maximum
339 * of those numbers. Finally, the maximum logarithm of a power of two
340 * is the same as the log of the maximum power of two. So we can do
341 * the following operations to size wl_bufbytes_max:
342 */
343
344 /* XXX fix actual number of pages reserved per filesystem. */
345 wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
346
347 /* Round wl_bufbytes_max to the largest power of two constraint */
348 wl->wl_bufbytes_max >>= PAGE_SHIFT;
349 wl->wl_bufbytes_max <<= PAGE_SHIFT;
350 wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
351 wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
352 wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
353 wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
354
355 /* XXX maybe use filesystem fragment size instead of 1024 */
356 /* XXX fix actual number of buffers reserved per filesystem. */
357 wl->wl_bufcount_max = (nbuf / 2) * 1024;
358
359 /* XXX tie this into resource estimation */
360 wl->wl_dealloclim = 2 * btodb(wl->wl_bufbytes_max);
361
362 #if WAPBL_UVM_ALLOC
363 wl->wl_deallocblks = (void *) uvm_km_zalloc(kernel_map,
364 round_page(sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim));
365 KASSERT(wl->wl_deallocblks != NULL);
366 wl->wl_dealloclens = (void *) uvm_km_zalloc(kernel_map,
367 round_page(sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim));
368 KASSERT(wl->wl_dealloclens != NULL);
369 #else
370 wl->wl_deallocblks = wapbl_malloc(sizeof(*wl->wl_deallocblks) *
371 wl->wl_dealloclim);
372 wl->wl_dealloclens = wapbl_malloc(sizeof(*wl->wl_dealloclens) *
373 wl->wl_dealloclim);
374 #endif
375
376 wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
377
378 /* Initialize the commit header */
379 {
380 struct wapbl_wc_header *wc;
381 size_t len = 1<<wl->wl_log_dev_bshift;
382 wc = wapbl_calloc(1, len);
383 wc->wc_type = WAPBL_WC_HEADER;
384 wc->wc_len = len;
385 wc->wc_circ_off = wl->wl_circ_off;
386 wc->wc_circ_size = wl->wl_circ_size;
387 /* XXX wc->wc_fsid */
388 wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
389 wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
390 wl->wl_wc_header = wc;
391 wl->wl_wc_scratch = wapbl_malloc(len);
392 }
393
394 /*
395 * if there was an existing set of unlinked but
396 * allocated inodes, preserve it in the new
397 * log.
398 */
399 if (wr && wr->wr_inodescnt) {
400 int i;
401
402 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
403 ("wapbl_start: reusing log with %d inodes\n",
404 wr->wr_inodescnt));
405
406 /*
407 * Its only valid to reuse the replay log if its
408 * the same as the new log we just opened.
409 */
410 KDASSERT(!wapbl_replay_isopen(wr));
411 KASSERT(devvp->v_rdev == wr->wr_devvp->v_rdev);
412 KASSERT(logpbn == wr->wr_logpbn);
413 KASSERT(wl->wl_circ_size == wr->wr_wc_header.wc_circ_size);
414 KASSERT(wl->wl_circ_off == wr->wr_wc_header.wc_circ_off);
415 KASSERT(wl->wl_log_dev_bshift ==
416 wr->wr_wc_header.wc_log_dev_bshift);
417 KASSERT(wl->wl_fs_dev_bshift ==
418 wr->wr_wc_header.wc_fs_dev_bshift);
419
420 wl->wl_wc_header->wc_generation =
421 wr->wr_wc_header.wc_generation + 1;
422
423 for (i = 0; i < wr->wr_inodescnt; i++)
424 wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
425 wr->wr_inodes[i].wr_imode);
426
427 /* Make sure new transaction won't overwrite old inodes list */
428 KDASSERT(wapbl_transaction_len(wl) <=
429 wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
430 wr->wr_inodestail));
431
432 wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
433 wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
434 wapbl_transaction_len(wl);
435
436 error = wapbl_write_inodes(wl, &wl->wl_head);
437 if (error)
438 goto errout;
439
440 KASSERT(wl->wl_head != wl->wl_tail);
441 KASSERT(wl->wl_head != 0);
442 }
443
444 error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
445 if (error) {
446 goto errout;
447 }
448
449 *wlp = wl;
450 #if defined(WAPBL_DEBUG)
451 wapbl_debug_wl = wl;
452 #endif
453
454 return 0;
455 errout:
456 wapbl_discard(wl);
457 wapbl_free(wl->wl_wc_scratch);
458 wapbl_free(wl->wl_wc_header);
459 #if WAPBL_UVM_ALLOC
460 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_deallocblks,
461 round_page(sizeof(*wl->wl_deallocblks *
462 wl->wl_dealloclim)));
463 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_dealloclens,
464 round_page(sizeof(*wl->wl_dealloclens *
465 wl->wl_dealloclim)));
466 #else
467 wapbl_free(wl->wl_deallocblks);
468 wapbl_free(wl->wl_dealloclens);
469 #endif
470 wapbl_inodetrk_free(wl);
471 wapbl_free(wl);
472
473 return error;
474 }
475
476 /*
477 * Like wapbl_flush, only discards the transaction
478 * completely
479 */
480
481 void
482 wapbl_discard(struct wapbl *wl)
483 {
484 struct wapbl_entry *we;
485 struct buf *bp;
486 int i;
487
488 /*
489 * XXX we may consider using upgrade here
490 * if we want to call flush from inside a transaction
491 */
492 rw_enter(&wl->wl_rwlock, RW_WRITER);
493 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
494 wl->wl_dealloccnt);
495
496 #ifdef WAPBL_DEBUG_PRINT
497 {
498 struct wapbl_entry *we;
499 pid_t pid = -1;
500 lwpid_t lid = -1;
501 if (curproc)
502 pid = curproc->p_pid;
503 if (curlwp)
504 lid = curlwp->l_lid;
505 #ifdef WAPBL_DEBUG_BUFBYTES
506 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
507 ("wapbl_discard: thread %d.%d discarding "
508 "transaction\n"
509 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
510 "deallocs=%d inodes=%d\n"
511 "\terrcnt = %u, reclaimable=%zu reserved=%zu "
512 "unsynced=%zu\n",
513 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
514 wl->wl_bcount, wl->wl_dealloccnt,
515 wl->wl_inohashcnt, wl->wl_error_count,
516 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
517 wl->wl_unsynced_bufbytes));
518 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
519 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
520 ("\tentry: bufcount = %zu, reclaimable = %zu, "
521 "error = %d, unsynced = %zu\n",
522 we->we_bufcount, we->we_reclaimable_bytes,
523 we->we_error, we->we_unsynced_bufbytes));
524 }
525 #else /* !WAPBL_DEBUG_BUFBYTES */
526 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
527 ("wapbl_discard: thread %d.%d discarding transaction\n"
528 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
529 "deallocs=%d inodes=%d\n"
530 "\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
531 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
532 wl->wl_bcount, wl->wl_dealloccnt,
533 wl->wl_inohashcnt, wl->wl_error_count,
534 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes));
535 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
536 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
537 ("\tentry: bufcount = %zu, reclaimable = %zu, "
538 "error = %d\n",
539 we->we_bufcount, we->we_reclaimable_bytes,
540 we->we_error));
541 }
542 #endif /* !WAPBL_DEBUG_BUFBYTES */
543 }
544 #endif /* WAPBL_DEBUG_PRINT */
545
546 for (i = 0; i <= wl->wl_inohashmask; i++) {
547 struct wapbl_ino_head *wih;
548 struct wapbl_ino *wi;
549
550 wih = &wl->wl_inohash[i];
551 while ((wi = LIST_FIRST(wih)) != NULL) {
552 LIST_REMOVE(wi, wi_hash);
553 pool_put(&wapbl_ino_pool, wi);
554 KASSERT(wl->wl_inohashcnt > 0);
555 wl->wl_inohashcnt--;
556 }
557 }
558
559 /*
560 * clean buffer list
561 */
562 mutex_enter(&bufcache_lock);
563 mutex_enter(&wl->wl_mtx);
564 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
565 if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
566 /*
567 * The buffer will be unlocked and
568 * removed from the transaction in brelse
569 */
570 mutex_exit(&wl->wl_mtx);
571 brelsel(bp, 0);
572 mutex_enter(&wl->wl_mtx);
573 }
574 }
575 mutex_exit(&wl->wl_mtx);
576 mutex_exit(&bufcache_lock);
577
578 /*
579 * Remove references to this wl from wl_entries, free any which
580 * no longer have buffers, others will be freed in wapbl_biodone
581 * when they no longer have any buffers.
582 */
583 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
584 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
585 /* XXX should we be accumulating wl_error_count
586 * and increasing reclaimable bytes ? */
587 we->we_wapbl = NULL;
588 if (we->we_bufcount == 0) {
589 #ifdef WAPBL_DEBUG_BUFBYTES
590 KASSERT(we->we_unsynced_bufbytes == 0);
591 #endif
592 wapbl_free(we);
593 }
594 }
595
596 /* Discard list of deallocs */
597 wl->wl_dealloccnt = 0;
598 /* XXX should we clear wl_reserved_bytes? */
599
600 KASSERT(wl->wl_bufbytes == 0);
601 KASSERT(wl->wl_bcount == 0);
602 KASSERT(wl->wl_bufcount == 0);
603 KASSERT(LIST_EMPTY(&wl->wl_bufs));
604 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
605 KASSERT(wl->wl_inohashcnt == 0);
606
607 rw_exit(&wl->wl_rwlock);
608 }
609
610 int
611 wapbl_stop(struct wapbl *wl, int force)
612 {
613 struct vnode *vp;
614 int error;
615
616 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
617 error = wapbl_flush(wl, 1);
618 if (error) {
619 if (force)
620 wapbl_discard(wl);
621 else
622 return error;
623 }
624
625 /* Unlinked inodes persist after a flush */
626 if (wl->wl_inohashcnt) {
627 if (force) {
628 wapbl_discard(wl);
629 } else {
630 return EBUSY;
631 }
632 }
633
634 KASSERT(wl->wl_bufbytes == 0);
635 KASSERT(wl->wl_bcount == 0);
636 KASSERT(wl->wl_bufcount == 0);
637 KASSERT(LIST_EMPTY(&wl->wl_bufs));
638 KASSERT(wl->wl_dealloccnt == 0);
639 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
640 KASSERT(wl->wl_inohashcnt == 0);
641
642 vp = wl->wl_logvp;
643
644 wapbl_free(wl->wl_wc_scratch);
645 wapbl_free(wl->wl_wc_header);
646 #if WAPBL_UVM_ALLOC
647 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_deallocblks,
648 round_page(sizeof(*wl->wl_deallocblks *
649 wl->wl_dealloclim)));
650 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_dealloclens,
651 round_page(sizeof(*wl->wl_dealloclens *
652 wl->wl_dealloclim)));
653 #else
654 wapbl_free(wl->wl_deallocblks);
655 wapbl_free(wl->wl_dealloclens);
656 #endif
657 wapbl_inodetrk_free(wl);
658
659 cv_destroy(&wl->wl_reclaimable_cv);
660 mutex_destroy(&wl->wl_mtx);
661 rw_destroy(&wl->wl_rwlock);
662 wapbl_free(wl);
663
664 return 0;
665 }
666
667 static int
668 wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
669 {
670 struct pstats *pstats = curlwp->l_proc->p_stats;
671 struct buf *bp;
672 int error;
673
674 KASSERT((flags & ~(B_WRITE | B_READ)) == 0);
675 KASSERT(devvp->v_type == VBLK);
676
677 if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
678 mutex_enter(&devvp->v_interlock);
679 devvp->v_numoutput++;
680 mutex_exit(&devvp->v_interlock);
681 pstats->p_ru.ru_oublock++;
682 } else {
683 pstats->p_ru.ru_inblock++;
684 }
685
686 bp = getiobuf(devvp, true);
687 bp->b_flags = flags;
688 bp->b_cflags = BC_BUSY; /* silly & dubious */
689 bp->b_dev = devvp->v_rdev;
690 bp->b_data = data;
691 bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
692 bp->b_blkno = pbn;
693
694 WAPBL_PRINTF(WAPBL_PRINT_IO,
695 ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%x\n",
696 BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
697 bp->b_blkno, bp->b_dev));
698
699 VOP_STRATEGY(devvp, bp);
700
701 error = biowait(bp);
702 putiobuf(bp);
703
704 if (error) {
705 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
706 ("wapbl_doio: %s %zu bytes at block %" PRId64
707 " on dev 0x%x failed with error %d\n",
708 (((flags & (B_WRITE | B_READ)) == B_WRITE) ?
709 "write" : "read"),
710 len, pbn, devvp->v_rdev, error));
711 }
712
713 return error;
714 }
715
716 int
717 wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
718 {
719
720 return wapbl_doio(data, len, devvp, pbn, B_WRITE);
721 }
722
723 int
724 wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
725 {
726
727 return wapbl_doio(data, len, devvp, pbn, B_READ);
728 }
729
730 /*
731 * Off is byte offset returns new offset for next write
732 * handles log wraparound
733 */
734 static int
735 wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
736 {
737 size_t slen;
738 off_t off = *offp;
739 int error;
740
741 KDASSERT(((len >> wl->wl_log_dev_bshift) <<
742 wl->wl_log_dev_bshift) == len);
743
744 if (off < wl->wl_circ_off)
745 off = wl->wl_circ_off;
746 slen = wl->wl_circ_off + wl->wl_circ_size - off;
747 if (slen < len) {
748 error = wapbl_write(data, slen, wl->wl_devvp,
749 wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
750 if (error)
751 return error;
752 data = (uint8_t *)data + slen;
753 len -= slen;
754 off = wl->wl_circ_off;
755 }
756 error = wapbl_write(data, len, wl->wl_devvp,
757 wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
758 if (error)
759 return error;
760 off += len;
761 if (off >= wl->wl_circ_off + wl->wl_circ_size)
762 off = wl->wl_circ_off;
763 *offp = off;
764 return 0;
765 }
766
767 /****************************************************************/
768
769 int
770 wapbl_begin(struct wapbl *wl, const char *file, int line)
771 {
772 int doflush;
773 unsigned lockcount;
774 krw_t op;
775
776 KDASSERT(wl);
777
778 #ifdef WAPBL_DEBUG_SERIALIZE
779 op = RW_WRITER;
780 #else
781 op = RW_READER;
782 #endif
783
784 /*
785 * XXX this needs to be made much more sophisticated.
786 * perhaps each wapbl_begin could reserve a specified
787 * number of buffers and bytes.
788 */
789 mutex_enter(&wl->wl_mtx);
790 lockcount = wl->wl_lock_count;
791 doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) >
792 wl->wl_bufbytes_max / 2) ||
793 ((wl->wl_bufcount + (lockcount * 10)) >
794 wl->wl_bufcount_max / 2) ||
795 (wapbl_transaction_len(wl) > wl->wl_circ_size / 2);
796 mutex_exit(&wl->wl_mtx);
797
798 if (doflush) {
799 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
800 ("force flush lockcnt=%d bufbytes=%zu "
801 "(max=%zu) bufcount=%zu (max=%zu)\n",
802 lockcount, wl->wl_bufbytes,
803 wl->wl_bufbytes_max, wl->wl_bufcount,
804 wl->wl_bufcount_max));
805 }
806
807 if (doflush) {
808 int error = wapbl_flush(wl, 0);
809 if (error)
810 return error;
811 }
812
813 rw_enter(&wl->wl_rwlock, op);
814 mutex_enter(&wl->wl_mtx);
815 wl->wl_lock_count++;
816 mutex_exit(&wl->wl_mtx);
817
818 #if defined(WAPBL_DEBUG_PRINT) && defined(WAPBL_DEBUG_SERIALIZE)
819 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
820 ("wapbl_begin thread %d.%d with bufcount=%zu "
821 "bufbytes=%zu bcount=%zu at %s:%d\n",
822 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
823 wl->wl_bufbytes, wl->wl_bcount, file, line));
824 #endif
825
826 return 0;
827 }
828
829 void
830 wapbl_end(struct wapbl *wl)
831 {
832
833 #if defined(WAPBL_DEBUG_PRINT) && defined(WAPBL_DEBUG_SERIALIZE)
834 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
835 ("wapbl_end thread %d.%d with bufcount=%zu "
836 "bufbytes=%zu bcount=%zu\n",
837 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
838 wl->wl_bufbytes, wl->wl_bcount));
839 #endif
840
841 mutex_enter(&wl->wl_mtx);
842 KASSERT(wl->wl_lock_count > 0);
843 wl->wl_lock_count--;
844 mutex_exit(&wl->wl_mtx);
845
846 rw_exit(&wl->wl_rwlock);
847 }
848
849 void
850 wapbl_add_buf(struct wapbl *wl, struct buf * bp)
851 {
852
853 KASSERT(bp->b_cflags & BC_BUSY);
854 KASSERT(bp->b_vp);
855
856 wapbl_jlock_assert(wl);
857
858 #if 0
859 /*
860 * XXX this might be an issue for swapfiles.
861 * see uvm_swap.c:1702
862 *
863 * XXX2 why require it then? leap of semantics?
864 */
865 KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
866 #endif
867
868 mutex_enter(&wl->wl_mtx);
869 if (bp->b_flags & B_LOCKED) {
870 LIST_REMOVE(bp, b_wapbllist);
871 WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
872 ("wapbl_add_buf thread %d.%d re-adding buf %p "
873 "with %d bytes %d bcount\n",
874 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
875 bp->b_bcount));
876 } else {
877 /* unlocked by dirty buffers shouldn't exist */
878 KASSERT(!(bp->b_oflags & BO_DELWRI));
879 wl->wl_bufbytes += bp->b_bufsize;
880 wl->wl_bcount += bp->b_bcount;
881 wl->wl_bufcount++;
882 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
883 ("wapbl_add_buf thread %d.%d adding buf %p "
884 "with %d bytes %d bcount\n",
885 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
886 bp->b_bcount));
887 }
888 LIST_INSERT_HEAD(&wl->wl_bufs, bp, b_wapbllist);
889 mutex_exit(&wl->wl_mtx);
890
891 bp->b_flags |= B_LOCKED;
892 }
893
894 static void
895 wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
896 {
897
898 KASSERT(mutex_owned(&wl->wl_mtx));
899 KASSERT(bp->b_cflags & BC_BUSY);
900 wapbl_jlock_assert(wl);
901
902 #if 0
903 /*
904 * XXX this might be an issue for swapfiles.
905 * see uvm_swap.c:1725
906 *
907 * XXXdeux: see above
908 */
909 KASSERT((bp->b_flags & BC_NOCACHE) == 0);
910 #endif
911 KASSERT(bp->b_flags & B_LOCKED);
912
913 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
914 ("wapbl_remove_buf thread %d.%d removing buf %p with "
915 "%d bytes %d bcount\n",
916 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, bp->b_bcount));
917
918 KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
919 wl->wl_bufbytes -= bp->b_bufsize;
920 KASSERT(wl->wl_bcount >= bp->b_bcount);
921 wl->wl_bcount -= bp->b_bcount;
922 KASSERT(wl->wl_bufcount > 0);
923 wl->wl_bufcount--;
924 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
925 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
926 LIST_REMOVE(bp, b_wapbllist);
927
928 bp->b_flags &= ~B_LOCKED;
929 }
930
931 /* called from brelsel() in vfs_bio among other places */
932 void
933 wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
934 {
935
936 mutex_enter(&wl->wl_mtx);
937 wapbl_remove_buf_locked(wl, bp);
938 mutex_exit(&wl->wl_mtx);
939 }
940
941 void
942 wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
943 {
944
945 KASSERT(bp->b_cflags & BC_BUSY);
946
947 /*
948 * XXX: why does this depend on B_LOCKED? otherwise the buf
949 * is not for a transaction? if so, why is this called in the
950 * first place?
951 */
952 if (bp->b_flags & B_LOCKED) {
953 mutex_enter(&wl->wl_mtx);
954 wl->wl_bufbytes += bp->b_bufsize - oldsz;
955 wl->wl_bcount += bp->b_bcount - oldcnt;
956 mutex_exit(&wl->wl_mtx);
957 }
958 }
959
960 #endif /* _KERNEL */
961
962 /****************************************************************/
963 /* Some utility inlines */
964
965 /* This is used to advance the pointer at old to new value at old+delta */
966 static __inline off_t
967 wapbl_advance(size_t size, size_t off, off_t old, size_t delta)
968 {
969 off_t new;
970
971 /* Define acceptable ranges for inputs. */
972 KASSERT(delta <= size);
973 KASSERT((old == 0) || (old >= off));
974 KASSERT(old < (size + off));
975
976 if ((old == 0) && (delta != 0))
977 new = off + delta;
978 else if ((old + delta) < (size + off))
979 new = old + delta;
980 else
981 new = (old + delta) - size;
982
983 /* Note some interesting axioms */
984 KASSERT((delta != 0) || (new == old));
985 KASSERT((delta == 0) || (new != 0));
986 KASSERT((delta != (size)) || (new == old));
987
988 /* Define acceptable ranges for output. */
989 KASSERT((new == 0) || (new >= off));
990 KASSERT(new < (size + off));
991 return new;
992 }
993
994 static __inline size_t
995 wapbl_space_used(size_t avail, off_t head, off_t tail)
996 {
997
998 if (tail == 0) {
999 KASSERT(head == 0);
1000 return 0;
1001 }
1002 return ((head + (avail - 1) - tail) % avail) + 1;
1003 }
1004
1005 static __inline size_t
1006 wapbl_space_free(size_t avail, off_t head, off_t tail)
1007 {
1008
1009 return avail - wapbl_space_used(avail, head, tail);
1010 }
1011
1012 static __inline void
1013 wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1014 off_t *tailp)
1015 {
1016 off_t head = *headp;
1017 off_t tail = *tailp;
1018
1019 KASSERT(delta <= wapbl_space_free(size, head, tail));
1020 head = wapbl_advance(size, off, head, delta);
1021 if ((tail == 0) && (head != 0))
1022 tail = off;
1023 *headp = head;
1024 *tailp = tail;
1025 }
1026
1027 static __inline void
1028 wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1029 off_t *tailp)
1030 {
1031 off_t head = *headp;
1032 off_t tail = *tailp;
1033
1034 KASSERT(delta <= wapbl_space_used(size, head, tail));
1035 tail = wapbl_advance(size, off, tail, delta);
1036 if (head == tail) {
1037 head = tail = 0;
1038 }
1039 *headp = head;
1040 *tailp = tail;
1041 }
1042
1043 #ifdef _KERNEL
1044
1045 /****************************************************************/
1046
1047 /*
1048 * Remove transactions whose buffers are completely flushed to disk.
1049 * Will block until at least minfree space is available.
1050 * only intended to be called from inside wapbl_flush and therefore
1051 * does not protect against commit races with itself or with flush.
1052 */
1053 static int
1054 wapbl_truncate(struct wapbl *wl, size_t minfree, int waitonly)
1055 {
1056 size_t delta;
1057 size_t avail;
1058 off_t head;
1059 off_t tail;
1060 int error = 0;
1061
1062 KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1063 KASSERT(rw_write_held(&wl->wl_rwlock));
1064
1065 mutex_enter(&wl->wl_mtx);
1066
1067 /*
1068 * First check to see if we have to do a commit
1069 * at all.
1070 */
1071 avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1072 if (minfree < avail) {
1073 mutex_exit(&wl->wl_mtx);
1074 return 0;
1075 }
1076 minfree -= avail;
1077 while ((wl->wl_error_count == 0) &&
1078 (wl->wl_reclaimable_bytes < minfree)) {
1079 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1080 ("wapbl_truncate: sleeping on %p wl=%p bytes=%zd "
1081 "minfree=%zd\n",
1082 &wl->wl_reclaimable_bytes, wl, wl->wl_reclaimable_bytes,
1083 minfree));
1084
1085 cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1086 }
1087 if (wl->wl_reclaimable_bytes < minfree) {
1088 KASSERT(wl->wl_error_count);
1089 /* XXX maybe get actual error from buffer instead someday? */
1090 error = EIO;
1091 }
1092 head = wl->wl_head;
1093 tail = wl->wl_tail;
1094 delta = wl->wl_reclaimable_bytes;
1095
1096 /* If all of of the entries are flushed, then be sure to keep
1097 * the reserved bytes reserved. Watch out for discarded transactions,
1098 * which could leave more bytes reserved than are reclaimable.
1099 */
1100 if (SIMPLEQ_EMPTY(&wl->wl_entries) &&
1101 (delta >= wl->wl_reserved_bytes)) {
1102 delta -= wl->wl_reserved_bytes;
1103 }
1104 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1105 &tail);
1106 KDASSERT(wl->wl_reserved_bytes <=
1107 wapbl_space_used(wl->wl_circ_size, head, tail));
1108 mutex_exit(&wl->wl_mtx);
1109
1110 if (error)
1111 return error;
1112
1113 if (waitonly)
1114 return 0;
1115
1116 /*
1117 * This is where head, tail and delta are unprotected
1118 * from races against itself or flush. This is ok since
1119 * we only call this routine from inside flush itself.
1120 *
1121 * XXX: how can it race against itself when accessed only
1122 * from behind the write-locked rwlock?
1123 */
1124 error = wapbl_write_commit(wl, head, tail);
1125 if (error)
1126 return error;
1127
1128 wl->wl_head = head;
1129 wl->wl_tail = tail;
1130
1131 mutex_enter(&wl->wl_mtx);
1132 KASSERT(wl->wl_reclaimable_bytes >= delta);
1133 wl->wl_reclaimable_bytes -= delta;
1134 mutex_exit(&wl->wl_mtx);
1135 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1136 ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1137 curproc->p_pid, curlwp->l_lid, delta));
1138
1139 return 0;
1140 }
1141
1142 /****************************************************************/
1143
1144 void
1145 wapbl_biodone(struct buf *bp)
1146 {
1147 struct wapbl_entry *we = bp->b_private;
1148 struct wapbl *wl = we->we_wapbl;
1149
1150 /*
1151 * Handle possible flushing of buffers after log has been
1152 * decomissioned.
1153 */
1154 if (!wl) {
1155 KASSERT(we->we_bufcount > 0);
1156 we->we_bufcount--;
1157 #ifdef WAPBL_DEBUG_BUFBYTES
1158 KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1159 we->we_unsynced_bufbytes -= bp->b_bufsize;
1160 #endif
1161
1162 if (we->we_bufcount == 0) {
1163 #ifdef WAPBL_DEBUG_BUFBYTES
1164 KASSERT(we->we_unsynced_bufbytes == 0);
1165 #endif
1166 wapbl_free(we);
1167 }
1168
1169 brelse(bp, 0);
1170 return;
1171 }
1172
1173 #ifdef ohbother
1174 KDASSERT(bp->b_flags & B_DONE);
1175 KDASSERT(!(bp->b_flags & B_DELWRI));
1176 KDASSERT(bp->b_flags & B_ASYNC);
1177 KDASSERT(bp->b_flags & B_BUSY);
1178 KDASSERT(!(bp->b_flags & B_LOCKED));
1179 KDASSERT(!(bp->b_flags & B_READ));
1180 KDASSERT(!(bp->b_flags & B_INVAL));
1181 KDASSERT(!(bp->b_flags & B_NOCACHE));
1182 #endif
1183
1184 if (bp->b_error) {
1185 #ifdef notyet /* Can't currently handle possible dirty buffer reuse */
1186 XXXpooka: interfaces not fully updated
1187 Note: this was not enabled in the original patch
1188 against netbsd4 either. I don't know if comment
1189 above is true or not.
1190
1191 /*
1192 * If an error occurs, report the error and leave the
1193 * buffer as a delayed write on the LRU queue.
1194 * restarting the write would likely result in
1195 * an error spinloop, so let it be done harmlessly
1196 * by the syncer.
1197 */
1198 bp->b_flags &= ~(B_DONE);
1199 simple_unlock(&bp->b_interlock);
1200
1201 if (we->we_error == 0) {
1202 mutex_enter(&wl->wl_mtx);
1203 wl->wl_error_count++;
1204 mutex_exit(&wl->wl_mtx);
1205 cv_broadcast(&wl->wl_reclaimable_cv);
1206 }
1207 we->we_error = bp->b_error;
1208 bp->b_error = 0;
1209 brelse(bp);
1210 return;
1211 #else
1212 /* For now, just mark the log permanently errored out */
1213
1214 mutex_enter(&wl->wl_mtx);
1215 if (wl->wl_error_count == 0) {
1216 wl->wl_error_count++;
1217 cv_broadcast(&wl->wl_reclaimable_cv);
1218 }
1219 mutex_exit(&wl->wl_mtx);
1220 #endif
1221 }
1222
1223 mutex_enter(&wl->wl_mtx);
1224
1225 KASSERT(we->we_bufcount > 0);
1226 we->we_bufcount--;
1227 #ifdef WAPBL_DEBUG_BUFBYTES
1228 KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1229 we->we_unsynced_bufbytes -= bp->b_bufsize;
1230 KASSERT(wl->wl_unsynced_bufbytes >= bp->b_bufsize);
1231 wl->wl_unsynced_bufbytes -= bp->b_bufsize;
1232 #endif
1233
1234 /*
1235 * If the current transaction can be reclaimed, start
1236 * at the beginning and reclaim any consecutive reclaimable
1237 * transactions. If we successfully reclaim anything,
1238 * then wakeup anyone waiting for the reclaim.
1239 */
1240 if (we->we_bufcount == 0) {
1241 size_t delta = 0;
1242 int errcnt = 0;
1243 #ifdef WAPBL_DEBUG_BUFBYTES
1244 KDASSERT(we->we_unsynced_bufbytes == 0);
1245 #endif
1246 /*
1247 * clear any posted error, since the buffer it came from
1248 * has successfully flushed by now
1249 */
1250 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1251 (we->we_bufcount == 0)) {
1252 delta += we->we_reclaimable_bytes;
1253 if (we->we_error)
1254 errcnt++;
1255 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1256 wapbl_free(we);
1257 }
1258
1259 if (delta) {
1260 wl->wl_reclaimable_bytes += delta;
1261 KASSERT(wl->wl_error_count >= errcnt);
1262 wl->wl_error_count -= errcnt;
1263 cv_broadcast(&wl->wl_reclaimable_cv);
1264 }
1265 }
1266
1267 mutex_exit(&wl->wl_mtx);
1268 brelse(bp, 0);
1269 }
1270
1271 /*
1272 * Write transactions to disk + start I/O for contents
1273 */
1274 int
1275 wapbl_flush(struct wapbl *wl, int waitfor)
1276 {
1277 struct buf *bp;
1278 struct wapbl_entry *we;
1279 off_t off;
1280 off_t head;
1281 off_t tail;
1282 size_t delta = 0;
1283 size_t flushsize;
1284 size_t reserved;
1285 int error = 0;
1286
1287 /*
1288 * Do a quick check to see if a full flush can be skipped
1289 * This assumes that the flush callback does not need to be called
1290 * unless there are other outstanding bufs.
1291 */
1292 if (!waitfor) {
1293 size_t nbufs;
1294 mutex_enter(&wl->wl_mtx); /* XXX need mutex here to
1295 protect the KASSERTS */
1296 nbufs = wl->wl_bufcount;
1297 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1298 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1299 mutex_exit(&wl->wl_mtx);
1300 if (nbufs == 0)
1301 return 0;
1302 }
1303
1304 /*
1305 * XXX we may consider using LK_UPGRADE here
1306 * if we want to call flush from inside a transaction
1307 */
1308 rw_enter(&wl->wl_rwlock, RW_WRITER);
1309 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
1310 wl->wl_dealloccnt);
1311
1312 /*
1313 * Now that we are fully locked and flushed,
1314 * do another check for nothing to do.
1315 */
1316 if (wl->wl_bufcount == 0) {
1317 goto out;
1318 }
1319
1320 #if 0
1321 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1322 ("wapbl_flush thread %d.%d flushing entries with "
1323 "bufcount=%zu bufbytes=%zu\n",
1324 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1325 wl->wl_bufbytes));
1326 #endif
1327
1328 /* Calculate amount of space needed to flush */
1329 flushsize = wapbl_transaction_len(wl);
1330
1331 if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1332 /*
1333 * XXX this could be handled more gracefully, perhaps place
1334 * only a partial transaction in the log and allow the
1335 * remaining to flush without the protection of the journal.
1336 */
1337 panic("wapbl_flush: current transaction too big to flush\n");
1338 }
1339
1340 error = wapbl_truncate(wl, flushsize, 0);
1341 if (error)
1342 goto out2;
1343
1344 off = wl->wl_head;
1345 KASSERT((off == 0) || ((off >= wl->wl_circ_off) &&
1346 (off < wl->wl_circ_off + wl->wl_circ_size)));
1347 error = wapbl_write_blocks(wl, &off);
1348 if (error)
1349 goto out2;
1350 error = wapbl_write_revocations(wl, &off);
1351 if (error)
1352 goto out2;
1353 error = wapbl_write_inodes(wl, &off);
1354 if (error)
1355 goto out2;
1356
1357 reserved = 0;
1358 if (wl->wl_inohashcnt)
1359 reserved = wapbl_transaction_inodes_len(wl);
1360
1361 head = wl->wl_head;
1362 tail = wl->wl_tail;
1363
1364 wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1365 &head, &tail);
1366 #ifdef WAPBL_DEBUG
1367 if (head != off) {
1368 panic("lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1369 " off=%"PRIdMAX" flush=%zu\n",
1370 (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1371 flushsize);
1372 }
1373 #else
1374 KASSERT(head == off);
1375 #endif
1376
1377 /* Opportunistically move the tail forward if we can */
1378 if (!wapbl_lazy_truncate) {
1379 mutex_enter(&wl->wl_mtx);
1380 delta = wl->wl_reclaimable_bytes;
1381 mutex_exit(&wl->wl_mtx);
1382 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1383 &head, &tail);
1384 }
1385
1386 error = wapbl_write_commit(wl, head, tail);
1387 if (error)
1388 goto out2;
1389
1390 /* poolme? or kmemme? */
1391 we = wapbl_calloc(1, sizeof(*we));
1392
1393 #ifdef WAPBL_DEBUG_BUFBYTES
1394 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1395 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1396 " unsynced=%zu"
1397 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1398 "inodes=%d\n",
1399 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1400 wapbl_space_used(wl->wl_circ_size, head, tail),
1401 wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1402 wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1403 wl->wl_inohashcnt));
1404 #else
1405 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1406 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1407 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1408 "inodes=%d\n",
1409 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1410 wapbl_space_used(wl->wl_circ_size, head, tail),
1411 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1412 wl->wl_dealloccnt, wl->wl_inohashcnt));
1413 #endif
1414
1415
1416 mutex_enter(&bufcache_lock);
1417 mutex_enter(&wl->wl_mtx);
1418
1419 wl->wl_reserved_bytes = reserved;
1420 wl->wl_head = head;
1421 wl->wl_tail = tail;
1422 KASSERT(wl->wl_reclaimable_bytes >= delta);
1423 wl->wl_reclaimable_bytes -= delta;
1424 wl->wl_dealloccnt = 0;
1425 #ifdef WAPBL_DEBUG_BUFBYTES
1426 wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1427 #endif
1428
1429 we->we_wapbl = wl;
1430 we->we_bufcount = wl->wl_bufcount;
1431 #ifdef WAPBL_DEBUG_BUFBYTES
1432 we->we_unsynced_bufbytes = wl->wl_bufbytes;
1433 #endif
1434 we->we_reclaimable_bytes = flushsize;
1435 we->we_error = 0;
1436 SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1437
1438 /*
1439 * this flushes bufs in reverse order than they were queued
1440 * it shouldn't matter, but if we care we could use TAILQ instead.
1441 * XXX Note they will get put on the lru queue when they flush
1442 * so we might actually want to change this to preserve order.
1443 */
1444 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
1445 if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1446 continue;
1447 }
1448 bp->b_iodone = wapbl_biodone;
1449 bp->b_private = we;
1450 bremfree(bp);
1451 wapbl_remove_buf_locked(wl, bp);
1452 mutex_exit(&wl->wl_mtx);
1453 mutex_exit(&bufcache_lock);
1454 bawrite(bp);
1455 mutex_enter(&bufcache_lock);
1456 mutex_enter(&wl->wl_mtx);
1457 }
1458 mutex_exit(&wl->wl_mtx);
1459 mutex_exit(&bufcache_lock);
1460
1461 #if 0
1462 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1463 ("wapbl_flush thread %d.%d done flushing entries...\n",
1464 curproc->p_pid, curlwp->l_lid));
1465 #endif
1466
1467 out:
1468
1469 /*
1470 * If the waitfor flag is set, don't return until everything is
1471 * fully flushed and the on disk log is empty.
1472 */
1473 if (waitfor) {
1474 error = wapbl_truncate(wl, wl->wl_circ_size -
1475 wl->wl_reserved_bytes, wapbl_lazy_truncate);
1476 }
1477
1478 out2:
1479 if (error) {
1480 wl->wl_flush_abort(wl->wl_mount, wl->wl_deallocblks,
1481 wl->wl_dealloclens, wl->wl_dealloccnt);
1482 }
1483
1484 #ifdef WAPBL_DEBUG_PRINT
1485 if (error) {
1486 pid_t pid = -1;
1487 lwpid_t lid = -1;
1488 if (curproc)
1489 pid = curproc->p_pid;
1490 if (curlwp)
1491 lid = curlwp->l_lid;
1492 mutex_enter(&wl->wl_mtx);
1493 #ifdef WAPBL_DEBUG_BUFBYTES
1494 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1495 ("wapbl_flush: thread %d.%d aborted flush: "
1496 "error = %d\n"
1497 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1498 "deallocs=%d inodes=%d\n"
1499 "\terrcnt = %d, reclaimable=%zu reserved=%zu "
1500 "unsynced=%zu\n",
1501 pid, lid, error, wl->wl_bufcount,
1502 wl->wl_bufbytes, wl->wl_bcount,
1503 wl->wl_dealloccnt, wl->wl_inohashcnt,
1504 wl->wl_error_count, wl->wl_reclaimable_bytes,
1505 wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1506 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1507 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1508 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1509 "error = %d, unsynced = %zu\n",
1510 we->we_bufcount, we->we_reclaimable_bytes,
1511 we->we_error, we->we_unsynced_bufbytes));
1512 }
1513 #else
1514 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1515 ("wapbl_flush: thread %d.%d aborted flush: "
1516 "error = %d\n"
1517 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1518 "deallocs=%d inodes=%d\n"
1519 "\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
1520 pid, lid, error, wl->wl_bufcount,
1521 wl->wl_bufbytes, wl->wl_bcount,
1522 wl->wl_dealloccnt, wl->wl_inohashcnt,
1523 wl->wl_error_count, wl->wl_reclaimable_bytes,
1524 wl->wl_reserved_bytes));
1525 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1526 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1527 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1528 "error = %d\n", we->we_bufcount,
1529 we->we_reclaimable_bytes, we->we_error));
1530 }
1531 #endif
1532 mutex_exit(&wl->wl_mtx);
1533 }
1534 #endif
1535
1536 rw_exit(&wl->wl_rwlock);
1537 return error;
1538 }
1539
1540 /****************************************************************/
1541
1542 void
1543 wapbl_jlock_assert(struct wapbl *wl)
1544 {
1545
1546 #ifdef WAPBL_DEBUG_SERIALIZE
1547 KASSERT(rw_write_held(&wl->wl_rwlock));
1548 #else
1549 KASSERT(rw_read_held(&wl->wl_rwlock) || rw_write_held(&wl->wl_rwlock));
1550 #endif
1551 }
1552
1553 void
1554 wapbl_junlock_assert(struct wapbl *wl)
1555 {
1556
1557 #ifdef WAPBL_DEBUG_SERIALIZE
1558 KASSERT(!rw_write_held(&wl->wl_rwlock));
1559 #endif
1560 }
1561
1562 /****************************************************************/
1563
1564 /* locks missing */
1565 void
1566 wapbl_print(struct wapbl *wl,
1567 int full,
1568 void (*pr)(const char *, ...))
1569 {
1570 struct buf *bp;
1571 struct wapbl_entry *we;
1572 (*pr)("wapbl %p", wl);
1573 (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
1574 wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
1575 (*pr)("circ = %zu, header = %zu, head = %"PRIdMAX" tail = %"PRIdMAX"\n",
1576 wl->wl_circ_size, wl->wl_circ_off,
1577 (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
1578 (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
1579 wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
1580 #ifdef WAPBL_DEBUG_BUFBYTES
1581 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1582 "reserved = %zu errcnt = %d unsynced = %zu\n",
1583 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1584 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1585 wl->wl_error_count, wl->wl_unsynced_bufbytes);
1586 #else
1587 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1588 "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
1589 wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1590 wl->wl_error_count);
1591 #endif
1592 (*pr)("\tdealloccnt = %d, dealloclim = %d\n",
1593 wl->wl_dealloccnt, wl->wl_dealloclim);
1594 (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
1595 wl->wl_inohashcnt, wl->wl_inohashmask);
1596 (*pr)("entries:\n");
1597 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1598 #ifdef WAPBL_DEBUG_BUFBYTES
1599 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
1600 "unsynced = %zu\n",
1601 we->we_bufcount, we->we_reclaimable_bytes,
1602 we->we_error, we->we_unsynced_bufbytes);
1603 #else
1604 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
1605 we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
1606 #endif
1607 }
1608 if (full) {
1609 int cnt = 0;
1610 (*pr)("bufs =");
1611 LIST_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
1612 if (!LIST_NEXT(bp, b_wapbllist)) {
1613 (*pr)(" %p", bp);
1614 } else if ((++cnt % 6) == 0) {
1615 (*pr)(" %p,\n\t", bp);
1616 } else {
1617 (*pr)(" %p,", bp);
1618 }
1619 }
1620 (*pr)("\n");
1621
1622 (*pr)("dealloced blks = ");
1623 {
1624 int i;
1625 cnt = 0;
1626 for (i = 0; i < wl->wl_dealloccnt; i++) {
1627 (*pr)(" %"PRId64":%d,",
1628 wl->wl_deallocblks[i],
1629 wl->wl_dealloclens[i]);
1630 if ((++cnt % 4) == 0) {
1631 (*pr)("\n\t");
1632 }
1633 }
1634 }
1635 (*pr)("\n");
1636
1637 (*pr)("registered inodes = ");
1638 {
1639 int i;
1640 cnt = 0;
1641 for (i = 0; i <= wl->wl_inohashmask; i++) {
1642 struct wapbl_ino_head *wih;
1643 struct wapbl_ino *wi;
1644
1645 wih = &wl->wl_inohash[i];
1646 LIST_FOREACH(wi, wih, wi_hash) {
1647 if (wi->wi_ino == 0)
1648 continue;
1649 (*pr)(" %"PRId32"/0%06"PRIo32",",
1650 wi->wi_ino, wi->wi_mode);
1651 if ((++cnt % 4) == 0) {
1652 (*pr)("\n\t");
1653 }
1654 }
1655 }
1656 (*pr)("\n");
1657 }
1658 }
1659 }
1660
1661 #if defined(WAPBL_DEBUG) || defined(DDB)
1662 void
1663 wapbl_dump(struct wapbl *wl)
1664 {
1665 #if defined(WAPBL_DEBUG)
1666 if (!wl)
1667 wl = wapbl_debug_wl;
1668 #endif
1669 if (!wl)
1670 return;
1671 wapbl_print(wl, 1, printf);
1672 }
1673 #endif
1674
1675 /****************************************************************/
1676
1677 void
1678 wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len)
1679 {
1680
1681 wapbl_jlock_assert(wl);
1682
1683 /* XXX should eventually instead tie this into resource estimation */
1684 /* XXX this KASSERT needs locking/mutex analysis */
1685 KASSERT(wl->wl_dealloccnt < wl->wl_dealloclim);
1686 wl->wl_deallocblks[wl->wl_dealloccnt] = blk;
1687 wl->wl_dealloclens[wl->wl_dealloccnt] = len;
1688 wl->wl_dealloccnt++;
1689 WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
1690 ("wapbl_register_deallocation: blk=%"PRId64" len=%d\n", blk, len));
1691 }
1692
1693 /****************************************************************/
1694
1695 static void
1696 wapbl_inodetrk_init(struct wapbl *wl, u_int size)
1697 {
1698
1699 wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
1700 if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) {
1701 pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0,
1702 "wapblinopl", &pool_allocator_nointr, IPL_NONE);
1703 }
1704 }
1705
1706 static void
1707 wapbl_inodetrk_free(struct wapbl *wl)
1708 {
1709
1710 /* XXX this KASSERT needs locking/mutex analysis */
1711 KASSERT(wl->wl_inohashcnt == 0);
1712 hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
1713 if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) {
1714 pool_destroy(&wapbl_ino_pool);
1715 }
1716 }
1717
1718 static struct wapbl_ino *
1719 wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
1720 {
1721 struct wapbl_ino_head *wih;
1722 struct wapbl_ino *wi;
1723
1724 KASSERT(mutex_owned(&wl->wl_mtx));
1725
1726 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1727 LIST_FOREACH(wi, wih, wi_hash) {
1728 if (ino == wi->wi_ino)
1729 return wi;
1730 }
1731 return 0;
1732 }
1733
1734 void
1735 wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1736 {
1737 struct wapbl_ino_head *wih;
1738 struct wapbl_ino *wi;
1739
1740 wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
1741
1742 mutex_enter(&wl->wl_mtx);
1743 if (wapbl_inodetrk_get(wl, ino) == NULL) {
1744 wi->wi_ino = ino;
1745 wi->wi_mode = mode;
1746 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1747 LIST_INSERT_HEAD(wih, wi, wi_hash);
1748 wl->wl_inohashcnt++;
1749 WAPBL_PRINTF(WAPBL_PRINT_INODE,
1750 ("wapbl_register_inode: ino=%"PRId64"\n", ino));
1751 mutex_exit(&wl->wl_mtx);
1752 } else {
1753 mutex_exit(&wl->wl_mtx);
1754 pool_put(&wapbl_ino_pool, wi);
1755 }
1756 }
1757
1758 void
1759 wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1760 {
1761 struct wapbl_ino *wi;
1762
1763 mutex_enter(&wl->wl_mtx);
1764 wi = wapbl_inodetrk_get(wl, ino);
1765 if (wi) {
1766 WAPBL_PRINTF(WAPBL_PRINT_INODE,
1767 ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
1768 KASSERT(wl->wl_inohashcnt > 0);
1769 wl->wl_inohashcnt--;
1770 LIST_REMOVE(wi, wi_hash);
1771 mutex_exit(&wl->wl_mtx);
1772
1773 pool_put(&wapbl_ino_pool, wi);
1774 } else {
1775 mutex_exit(&wl->wl_mtx);
1776 }
1777 }
1778
1779 /****************************************************************/
1780
1781 static __inline size_t
1782 wapbl_transaction_inodes_len(struct wapbl *wl)
1783 {
1784 int blocklen = 1<<wl->wl_log_dev_bshift;
1785 int iph;
1786
1787 /* Calculate number of inodes described in a inodelist header */
1788 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
1789 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
1790
1791 KASSERT(iph > 0);
1792
1793 return MAX(1, howmany(wl->wl_inohashcnt, iph))*blocklen;
1794 }
1795
1796
1797 /* Calculate amount of space a transaction will take on disk */
1798 static size_t
1799 wapbl_transaction_len(struct wapbl *wl)
1800 {
1801 int blocklen = 1<<wl->wl_log_dev_bshift;
1802 size_t len;
1803 int bph;
1804
1805 /* Calculate number of blocks described in a blocklist header */
1806 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1807 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1808
1809 KASSERT(bph > 0);
1810
1811 len = wl->wl_bcount;
1812 len += howmany(wl->wl_bufcount, bph)*blocklen;
1813 len += howmany(wl->wl_dealloccnt, bph)*blocklen;
1814 len += wapbl_transaction_inodes_len(wl);
1815
1816 return len;
1817 }
1818
1819 /*
1820 * Perform commit operation
1821 *
1822 * Note that generation number incrementation needs to
1823 * be protected against racing with other invocations
1824 * of wapbl_commit. This is ok since this routine
1825 * is only invoked from wapbl_flush
1826 */
1827 static int
1828 wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
1829 {
1830 struct wapbl_wc_header *wc = wl->wl_wc_header;
1831 struct timespec ts;
1832 int error;
1833 int force = 1;
1834
1835 /* XXX Calc checksum here, instead we do this for now */
1836 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1837 if (error) {
1838 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1839 ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1840 "returned %d\n", wl->wl_devvp->v_rdev, error));
1841 }
1842
1843 wc->wc_head = head;
1844 wc->wc_tail = tail;
1845 wc->wc_checksum = 0;
1846 wc->wc_version = 1;
1847 getnanotime(&ts); /* XXX need higher resolution time here? */
1848 wc->wc_time = ts.tv_sec;;
1849 wc->wc_timensec = ts.tv_nsec;
1850
1851 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1852 ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
1853 (intmax_t)head, (intmax_t)tail));
1854
1855 /*
1856 * XXX if generation will rollover, then first zero
1857 * over second commit header before trying to write both headers.
1858 */
1859
1860 error = wapbl_write(wc, wc->wc_len, wl->wl_devvp,
1861 wl->wl_logpbn + wc->wc_generation % 2);
1862 if (error)
1863 return error;
1864
1865 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1866 if (error) {
1867 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1868 ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1869 "returned %d\n", wl->wl_devvp->v_rdev, error));
1870 }
1871
1872 /*
1873 * If the generation number was zero, write it out a second time.
1874 * This handles initialization and generation number rollover
1875 */
1876 if (wc->wc_generation++ == 0) {
1877 error = wapbl_write_commit(wl, head, tail);
1878 /*
1879 * This panic should be able to be removed if we do the
1880 * zero'ing mentioned above, and we are certain to roll
1881 * back generation number on failure.
1882 */
1883 if (error)
1884 panic("wapbl_write_commit: error writing duplicate "
1885 "log header: %d\n", error);
1886 }
1887 return 0;
1888 }
1889
1890 /* Returns new offset value */
1891 static int
1892 wapbl_write_blocks(struct wapbl *wl, off_t *offp)
1893 {
1894 struct wapbl_wc_blocklist *wc =
1895 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1896 int blocklen = 1<<wl->wl_log_dev_bshift;
1897 int bph;
1898 struct buf *bp;
1899 off_t off = *offp;
1900 int error;
1901
1902 KASSERT(rw_write_held(&wl->wl_rwlock));
1903
1904 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1905 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1906
1907 bp = LIST_FIRST(&wl->wl_bufs);
1908
1909 while (bp) {
1910 int cnt;
1911 struct buf *obp = bp;
1912
1913 KASSERT(bp->b_flags & B_LOCKED);
1914
1915 wc->wc_type = WAPBL_WC_BLOCKS;
1916 wc->wc_len = blocklen;
1917 wc->wc_blkcount = 0;
1918 while (bp && (wc->wc_blkcount < bph)) {
1919 /*
1920 * Make sure all the physical block numbers are up to
1921 * date. If this is not always true on a given
1922 * filesystem, then VOP_BMAP must be called. We
1923 * could call VOP_BMAP here, or else in the filesystem
1924 * specific flush callback, although neither of those
1925 * solutions allow us to take the vnode lock. If a
1926 * filesystem requires that we must take the vnode lock
1927 * to call VOP_BMAP, then we can probably do it in
1928 * bwrite when the vnode lock should already be held
1929 * by the invoking code.
1930 */
1931 KASSERT((bp->b_vp->v_type == VBLK) ||
1932 (bp->b_blkno != bp->b_lblkno));
1933 KASSERT(bp->b_blkno > 0);
1934
1935 wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
1936 wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
1937 wc->wc_len += bp->b_bcount;
1938 wc->wc_blkcount++;
1939 bp = LIST_NEXT(bp, b_wapbllist);
1940 }
1941 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1942 ("wapbl_write_blocks: len = %u off = %"PRIdMAX"\n",
1943 wc->wc_len, (intmax_t)off));
1944
1945 error = wapbl_circ_write(wl, wc, blocklen, &off);
1946 if (error)
1947 return error;
1948 bp = obp;
1949 cnt = 0;
1950 while (bp && (cnt++ < bph)) {
1951 error = wapbl_circ_write(wl, bp->b_data,
1952 bp->b_bcount, &off);
1953 if (error)
1954 return error;
1955 bp = LIST_NEXT(bp, b_wapbllist);
1956 }
1957 }
1958 *offp = off;
1959 return 0;
1960 }
1961
1962 static int
1963 wapbl_write_revocations(struct wapbl *wl, off_t *offp)
1964 {
1965 struct wapbl_wc_blocklist *wc =
1966 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1967 int i;
1968 int blocklen = 1<<wl->wl_log_dev_bshift;
1969 int bph;
1970 off_t off = *offp;
1971 int error;
1972
1973 if (wl->wl_dealloccnt == 0)
1974 return 0;
1975
1976 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1977 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1978
1979 i = 0;
1980 while (i < wl->wl_dealloccnt) {
1981 wc->wc_type = WAPBL_WC_REVOCATIONS;
1982 wc->wc_len = blocklen;
1983 wc->wc_blkcount = 0;
1984 while ((i < wl->wl_dealloccnt) && (wc->wc_blkcount < bph)) {
1985 wc->wc_blocks[wc->wc_blkcount].wc_daddr =
1986 wl->wl_deallocblks[i];
1987 wc->wc_blocks[wc->wc_blkcount].wc_dlen =
1988 wl->wl_dealloclens[i];
1989 wc->wc_blkcount++;
1990 i++;
1991 }
1992 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1993 ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
1994 wc->wc_len, (intmax_t)off));
1995 error = wapbl_circ_write(wl, wc, blocklen, &off);
1996 if (error)
1997 return error;
1998 }
1999 *offp = off;
2000 return 0;
2001 }
2002
2003 static int
2004 wapbl_write_inodes(struct wapbl *wl, off_t *offp)
2005 {
2006 struct wapbl_wc_inodelist *wc =
2007 (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
2008 int i;
2009 int blocklen = 1<<wl->wl_log_dev_bshift;
2010 off_t off = *offp;
2011 int error;
2012
2013 struct wapbl_ino_head *wih;
2014 struct wapbl_ino *wi;
2015 int iph;
2016
2017 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2018 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2019
2020 i = 0;
2021 wih = &wl->wl_inohash[0];
2022 wi = 0;
2023 do {
2024 wc->wc_type = WAPBL_WC_INODES;
2025 wc->wc_len = blocklen;
2026 wc->wc_inocnt = 0;
2027 wc->wc_clear = (i == 0);
2028 while ((i < wl->wl_inohashcnt) && (wc->wc_inocnt < iph)) {
2029 while (!wi) {
2030 KASSERT((wih - &wl->wl_inohash[0])
2031 <= wl->wl_inohashmask);
2032 wi = LIST_FIRST(wih++);
2033 }
2034 wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2035 wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2036 wc->wc_inocnt++;
2037 i++;
2038 wi = LIST_NEXT(wi, wi_hash);
2039 }
2040 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2041 ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2042 wc->wc_len, (intmax_t)off));
2043 error = wapbl_circ_write(wl, wc, blocklen, &off);
2044 if (error)
2045 return error;
2046 } while (i < wl->wl_inohashcnt);
2047
2048 *offp = off;
2049 return 0;
2050 }
2051
2052 #endif /* _KERNEL */
2053
2054 /****************************************************************/
2055
2056 #ifdef _KERNEL
2057 static struct pool wapbl_blk_pool;
2058 static int wapbl_blk_pool_refcount;
2059 #endif
2060 struct wapbl_blk {
2061 LIST_ENTRY(wapbl_blk) wb_hash;
2062 daddr_t wb_blk;
2063 off_t wb_off; /* Offset of this block in the log */
2064 };
2065 #define WAPBL_BLKPOOL_MIN 83
2066
2067 static void
2068 wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2069 {
2070 if (size < WAPBL_BLKPOOL_MIN)
2071 size = WAPBL_BLKPOOL_MIN;
2072 KASSERT(wr->wr_blkhash == 0);
2073 #ifdef _KERNEL
2074 wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2075 if (atomic_inc_uint_nv(&wapbl_blk_pool_refcount) == 1) {
2076 pool_init(&wapbl_blk_pool, sizeof(struct wapbl_blk), 0, 0, 0,
2077 "wapblblkpl", &pool_allocator_nointr, IPL_NONE);
2078 }
2079 #else /* ! _KERNEL */
2080 /* Manually implement hashinit */
2081 {
2082 int i;
2083 unsigned long hashsize;
2084 for (hashsize = 1; hashsize < size; hashsize <<= 1)
2085 continue;
2086 wr->wr_blkhash = wapbl_malloc(hashsize * sizeof(*wr->wr_blkhash));
2087 for (i = 0; i < wr->wr_blkhashmask; i++)
2088 LIST_INIT(&wr->wr_blkhash[i]);
2089 wr->wr_blkhashmask = hashsize - 1;
2090 }
2091 #endif /* ! _KERNEL */
2092 }
2093
2094 static void
2095 wapbl_blkhash_free(struct wapbl_replay *wr)
2096 {
2097 KASSERT(wr->wr_blkhashcnt == 0);
2098 #ifdef _KERNEL
2099 hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2100 if (atomic_dec_uint_nv(&wapbl_blk_pool_refcount) == 0) {
2101 pool_destroy(&wapbl_blk_pool);
2102 }
2103 #else /* ! _KERNEL */
2104 wapbl_free(wr->wr_blkhash);
2105 #endif /* ! _KERNEL */
2106 }
2107
2108 static struct wapbl_blk *
2109 wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2110 {
2111 struct wapbl_blk_head *wbh;
2112 struct wapbl_blk *wb;
2113 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2114 LIST_FOREACH(wb, wbh, wb_hash) {
2115 if (blk == wb->wb_blk)
2116 return wb;
2117 }
2118 return 0;
2119 }
2120
2121 static void
2122 wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2123 {
2124 struct wapbl_blk_head *wbh;
2125 struct wapbl_blk *wb;
2126 wb = wapbl_blkhash_get(wr, blk);
2127 if (wb) {
2128 KASSERT(wb->wb_blk == blk);
2129 wb->wb_off = off;
2130 } else {
2131 #ifdef _KERNEL
2132 wb = pool_get(&wapbl_blk_pool, PR_WAITOK);
2133 #else /* ! _KERNEL */
2134 wb = wapbl_malloc(sizeof(*wb));
2135 #endif /* ! _KERNEL */
2136 wb->wb_blk = blk;
2137 wb->wb_off = off;
2138 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2139 LIST_INSERT_HEAD(wbh, wb, wb_hash);
2140 wr->wr_blkhashcnt++;
2141 }
2142 }
2143
2144 static void
2145 wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2146 {
2147 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2148 if (wb) {
2149 KASSERT(wr->wr_blkhashcnt > 0);
2150 wr->wr_blkhashcnt--;
2151 LIST_REMOVE(wb, wb_hash);
2152 #ifdef _KERNEL
2153 pool_put(&wapbl_blk_pool, wb);
2154 #else /* ! _KERNEL */
2155 wapbl_free(wb);
2156 #endif /* ! _KERNEL */
2157 }
2158 }
2159
2160 static void
2161 wapbl_blkhash_clear(struct wapbl_replay *wr)
2162 {
2163 int i;
2164 for (i = 0; i <= wr->wr_blkhashmask; i++) {
2165 struct wapbl_blk *wb;
2166
2167 while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2168 KASSERT(wr->wr_blkhashcnt > 0);
2169 wr->wr_blkhashcnt--;
2170 LIST_REMOVE(wb, wb_hash);
2171 #ifdef _KERNEL
2172 pool_put(&wapbl_blk_pool, wb);
2173 #else /* ! _KERNEL */
2174 wapbl_free(wb);
2175 #endif /* ! _KERNEL */
2176 }
2177 }
2178 KASSERT(wr->wr_blkhashcnt == 0);
2179 }
2180
2181 /****************************************************************/
2182
2183 static int
2184 wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2185 {
2186 size_t slen;
2187 struct wapbl_wc_header *wc = &wr->wr_wc_header;
2188 off_t off = *offp;
2189 int error;
2190
2191 KASSERT(((len >> wc->wc_log_dev_bshift) <<
2192 wc->wc_log_dev_bshift) == len);
2193 if (off < wc->wc_circ_off)
2194 off = wc->wc_circ_off;
2195 slen = wc->wc_circ_off + wc->wc_circ_size - off;
2196 if (slen < len) {
2197 error = wapbl_read(data, slen, wr->wr_devvp,
2198 wr->wr_logpbn + (off >> wc->wc_log_dev_bshift));
2199 if (error)
2200 return error;
2201 data = (uint8_t *)data + slen;
2202 len -= slen;
2203 off = wc->wc_circ_off;
2204 }
2205 error = wapbl_read(data, len, wr->wr_devvp,
2206 wr->wr_logpbn + (off >> wc->wc_log_dev_bshift));
2207 if (error)
2208 return error;
2209 off += len;
2210 if (off >= wc->wc_circ_off + wc->wc_circ_size)
2211 off = wc->wc_circ_off;
2212 *offp = off;
2213 return 0;
2214 }
2215
2216 static void
2217 wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2218 {
2219 size_t slen;
2220 struct wapbl_wc_header *wc = &wr->wr_wc_header;
2221 off_t off = *offp;
2222
2223 KASSERT(((len >> wc->wc_log_dev_bshift) <<
2224 wc->wc_log_dev_bshift) == len);
2225
2226 if (off < wc->wc_circ_off)
2227 off = wc->wc_circ_off;
2228 slen = wc->wc_circ_off + wc->wc_circ_size - off;
2229 if (slen < len) {
2230 len -= slen;
2231 off = wc->wc_circ_off;
2232 }
2233 off += len;
2234 if (off >= wc->wc_circ_off + wc->wc_circ_size)
2235 off = wc->wc_circ_off;
2236 *offp = off;
2237 }
2238
2239 /****************************************************************/
2240
2241 int
2242 wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2243 daddr_t off, size_t count, size_t blksize)
2244 {
2245 struct wapbl_replay *wr;
2246 int error;
2247 struct vnode *devvp;
2248 daddr_t logpbn;
2249 uint8_t *scratch;
2250 struct wapbl_wc_header *wch;
2251 struct wapbl_wc_header *wch2;
2252 /* Use this until we read the actual log header */
2253 int log_dev_bshift = DEV_BSHIFT;
2254 size_t used;
2255
2256 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2257 ("wapbl_replay_start: vp=%p off=%"PRId64 " count=%zu blksize=%zu\n",
2258 vp, off, count, blksize));
2259
2260 if (off < 0)
2261 return EINVAL;
2262
2263 if (blksize < DEV_BSIZE)
2264 return EINVAL;
2265 if (blksize % DEV_BSIZE)
2266 return EINVAL;
2267
2268 #ifdef _KERNEL
2269 #if 0
2270 /* XXX vp->v_size isn't reliably set for VBLK devices,
2271 * especially root. However, we might still want to verify
2272 * that the full load is readable */
2273 if ((off + count) * blksize > vp->v_size)
2274 return EINVAL;
2275 #endif
2276
2277 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2278 return error;
2279 }
2280 #else /* ! _KERNEL */
2281 devvp = vp;
2282 logpbn = off;
2283 #endif /* ! _KERNEL */
2284
2285 scratch = wapbl_malloc(MAXBSIZE);
2286
2287 error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, logpbn);
2288 if (error)
2289 goto errout;
2290
2291 wch = (struct wapbl_wc_header *)scratch;
2292 wch2 =
2293 (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
2294 /* XXX verify checksums and magic numbers */
2295 if (wch->wc_type != WAPBL_WC_HEADER) {
2296 printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
2297 error = EFTYPE;
2298 goto errout;
2299 }
2300
2301 if (wch2->wc_generation > wch->wc_generation)
2302 wch = wch2;
2303
2304 wr = wapbl_calloc(1, sizeof(*wr));
2305
2306 wr->wr_logvp = vp;
2307 wr->wr_devvp = devvp;
2308 wr->wr_logpbn = logpbn;
2309
2310 wr->wr_scratch = scratch;
2311
2312 memcpy(&wr->wr_wc_header, wch, sizeof(wr->wr_wc_header));
2313
2314 used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
2315
2316 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2317 ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
2318 " len=%"PRId64" used=%zu\n",
2319 wch->wc_head, wch->wc_tail, wch->wc_circ_off,
2320 wch->wc_circ_size, used));
2321
2322 wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
2323 error = wapbl_replay_prescan(wr);
2324 if (error) {
2325 wapbl_replay_stop(wr);
2326 wapbl_replay_free(wr);
2327 return error;
2328 }
2329
2330 error = wapbl_replay_get_inodes(wr);
2331 if (error) {
2332 wapbl_replay_stop(wr);
2333 wapbl_replay_free(wr);
2334 return error;
2335 }
2336
2337 *wrp = wr;
2338 return 0;
2339
2340 errout:
2341 wapbl_free(scratch);
2342 return error;
2343 }
2344
2345 void
2346 wapbl_replay_stop(struct wapbl_replay *wr)
2347 {
2348
2349 WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
2350
2351 KDASSERT(wapbl_replay_isopen(wr));
2352
2353 wapbl_free(wr->wr_scratch);
2354 wr->wr_scratch = 0;
2355
2356 wr->wr_logvp = 0;
2357
2358 wapbl_blkhash_clear(wr);
2359 wapbl_blkhash_free(wr);
2360 }
2361
2362 void
2363 wapbl_replay_free(struct wapbl_replay *wr)
2364 {
2365
2366 KDASSERT(!wapbl_replay_isopen(wr));
2367
2368 if (wr->wr_inodes)
2369 wapbl_free(wr->wr_inodes);
2370 wapbl_free(wr);
2371 }
2372
2373 int
2374 wapbl_replay_isopen1(struct wapbl_replay *wr)
2375 {
2376
2377 return wapbl_replay_isopen(wr);
2378 }
2379
2380 static int
2381 wapbl_replay_prescan(struct wapbl_replay *wr)
2382 {
2383 off_t off;
2384 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2385 int error;
2386
2387 int logblklen = 1<<wch->wc_log_dev_bshift;
2388 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2389
2390 wapbl_blkhash_clear(wr);
2391
2392 off = wch->wc_tail;
2393 while (off != wch->wc_head) {
2394 struct wapbl_wc_null *wcn;
2395 off_t saveoff = off;
2396 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2397 if (error)
2398 goto errout;
2399 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2400 switch (wcn->wc_type) {
2401 case WAPBL_WC_BLOCKS:
2402 {
2403 struct wapbl_wc_blocklist *wc =
2404 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2405 int i;
2406 for (i = 0; i < wc->wc_blkcount; i++) {
2407 int j, n;
2408 /*
2409 * Enter each physical block into the
2410 * hashtable independently
2411 */
2412 n = wc->wc_blocks[i].wc_dlen >>
2413 wch->wc_fs_dev_bshift;
2414 for (j = 0; j < n; j++) {
2415 wapbl_blkhash_ins(wr,
2416 wc->wc_blocks[i].wc_daddr + j,
2417 off);
2418 wapbl_circ_advance(wr,
2419 fsblklen, &off);
2420 }
2421 }
2422 }
2423 break;
2424
2425 case WAPBL_WC_REVOCATIONS:
2426 {
2427 struct wapbl_wc_blocklist *wc =
2428 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2429 int i;
2430 for (i = 0; i < wc->wc_blkcount; i++) {
2431 int j, n;
2432 /*
2433 * Remove any blocks found from the
2434 * hashtable
2435 */
2436 n = wc->wc_blocks[i].wc_dlen >>
2437 wch->wc_fs_dev_bshift;
2438 for (j = 0; j < n; j++) {
2439 wapbl_blkhash_rem(wr,
2440 wc->wc_blocks[i].wc_daddr + j);
2441 }
2442 }
2443 }
2444 break;
2445
2446 case WAPBL_WC_INODES:
2447 {
2448 struct wapbl_wc_inodelist *wc =
2449 (struct wapbl_wc_inodelist *)wr->wr_scratch;
2450 /*
2451 * Keep track of where we found this so we
2452 * can use it later
2453 */
2454 if (wc->wc_clear) {
2455 wr->wr_inodestail = saveoff;
2456 wr->wr_inodescnt = 0;
2457 }
2458 if (wr->wr_inodestail)
2459 wr->wr_inodeshead = off;
2460 wr->wr_inodescnt += wc->wc_inocnt;
2461 }
2462 break;
2463 default:
2464 printf("Unrecognized wapbl type: 0x%08x\n",
2465 wcn->wc_type);
2466 error = EFTYPE;
2467 goto errout;
2468 }
2469 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2470 if (off != saveoff) {
2471 printf("wapbl_replay: corrupted records\n");
2472 error = EFTYPE;
2473 goto errout;
2474 }
2475 }
2476 return 0;
2477
2478 errout:
2479 wapbl_blkhash_clear(wr);
2480 return error;
2481 }
2482
2483 static int
2484 wapbl_replay_get_inodes(struct wapbl_replay *wr)
2485 {
2486 off_t off;
2487 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2488 int logblklen = 1<<wch->wc_log_dev_bshift;
2489 int cnt= 0;
2490
2491 KDASSERT(wapbl_replay_isopen(wr));
2492
2493 if (wr->wr_inodescnt == 0)
2494 return 0;
2495
2496 KASSERT(!wr->wr_inodes);
2497
2498 wr->wr_inodes = wapbl_malloc(wr->wr_inodescnt*sizeof(wr->wr_inodes[0]));
2499
2500 off = wr->wr_inodestail;
2501
2502 while (off != wr->wr_inodeshead) {
2503 struct wapbl_wc_null *wcn;
2504 int error;
2505 off_t saveoff = off;
2506 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2507 if (error) {
2508 wapbl_free(wr->wr_inodes);
2509 wr->wr_inodes = 0;
2510 return error;
2511 }
2512 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2513 switch (wcn->wc_type) {
2514 case WAPBL_WC_BLOCKS:
2515 case WAPBL_WC_REVOCATIONS:
2516 break;
2517 case WAPBL_WC_INODES:
2518 {
2519 struct wapbl_wc_inodelist *wc =
2520 (struct wapbl_wc_inodelist *)wr->wr_scratch;
2521 /*
2522 * Keep track of where we found this so we
2523 * can use it later
2524 */
2525 if (wc->wc_clear) {
2526 cnt = 0;
2527 }
2528 /* This memcpy assumes that wr_inodes is
2529 * laid out the same as wc_inodes. */
2530 memcpy(&wr->wr_inodes[cnt], wc->wc_inodes,
2531 wc->wc_inocnt*sizeof(wc->wc_inodes[0]));
2532 cnt += wc->wc_inocnt;
2533 }
2534 break;
2535 default:
2536 KASSERT(0);
2537 }
2538 off = saveoff;
2539 wapbl_circ_advance(wr, wcn->wc_len, &off);
2540 }
2541 KASSERT(cnt == wr->wr_inodescnt);
2542 return 0;
2543 }
2544
2545 #ifdef DEBUG
2546 int
2547 wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
2548 {
2549 off_t off;
2550 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2551 int mismatchcnt = 0;
2552 int logblklen = 1<<wch->wc_log_dev_bshift;
2553 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2554 void *scratch1 = wapbl_malloc(MAXBSIZE);
2555 void *scratch2 = wapbl_malloc(MAXBSIZE);
2556 int error = 0;
2557
2558 KDASSERT(wapbl_replay_isopen(wr));
2559
2560 off = wch->wc_tail;
2561 while (off != wch->wc_head) {
2562 struct wapbl_wc_null *wcn;
2563 #ifdef DEBUG
2564 off_t saveoff = off;
2565 #endif
2566 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2567 if (error)
2568 goto out;
2569 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2570 switch (wcn->wc_type) {
2571 case WAPBL_WC_BLOCKS:
2572 {
2573 struct wapbl_wc_blocklist *wc =
2574 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2575 int i;
2576 for (i = 0; i < wc->wc_blkcount; i++) {
2577 int foundcnt = 0;
2578 int dirtycnt = 0;
2579 int j, n;
2580 /*
2581 * Check each physical block into the
2582 * hashtable independently
2583 */
2584 n = wc->wc_blocks[i].wc_dlen >>
2585 wch->wc_fs_dev_bshift;
2586 for (j = 0; j < n; j++) {
2587 struct wapbl_blk *wb =
2588 wapbl_blkhash_get(wr,
2589 wc->wc_blocks[i].wc_daddr + j);
2590 if (wb && (wb->wb_off == off)) {
2591 foundcnt++;
2592 error =
2593 wapbl_circ_read(wr,
2594 scratch1, fsblklen,
2595 &off);
2596 if (error)
2597 goto out;
2598 error =
2599 wapbl_read(scratch2,
2600 fsblklen, fsdevvp,
2601 wb->wb_blk);
2602 if (error)
2603 goto out;
2604 if (memcmp(scratch1,
2605 scratch2,
2606 fsblklen)) {
2607 printf(
2608 "wapbl_verify: mismatch block %"PRId64" at off %"PRIdMAX"\n",
2609 wb->wb_blk, (intmax_t)off);
2610 dirtycnt++;
2611 mismatchcnt++;
2612 }
2613 } else {
2614 wapbl_circ_advance(wr,
2615 fsblklen, &off);
2616 }
2617 }
2618 #if 0
2619 /*
2620 * If all of the blocks in an entry
2621 * are clean, then remove all of its
2622 * blocks from the hashtable since they
2623 * never will need replay.
2624 */
2625 if ((foundcnt != 0) &&
2626 (dirtycnt == 0)) {
2627 off = saveoff;
2628 wapbl_circ_advance(wr,
2629 logblklen, &off);
2630 for (j = 0; j < n; j++) {
2631 struct wapbl_blk *wb =
2632 wapbl_blkhash_get(wr,
2633 wc->wc_blocks[i].wc_daddr + j);
2634 if (wb &&
2635 (wb->wb_off == off)) {
2636 wapbl_blkhash_rem(wr, wb->wb_blk);
2637 }
2638 wapbl_circ_advance(wr,
2639 fsblklen, &off);
2640 }
2641 }
2642 #endif
2643 }
2644 }
2645 break;
2646 case WAPBL_WC_REVOCATIONS:
2647 case WAPBL_WC_INODES:
2648 break;
2649 default:
2650 KASSERT(0);
2651 }
2652 #ifdef DEBUG
2653 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2654 KASSERT(off == saveoff);
2655 #endif
2656 }
2657 out:
2658 wapbl_free(scratch1);
2659 wapbl_free(scratch2);
2660 if (!error && mismatchcnt)
2661 error = EFTYPE;
2662 return error;
2663 }
2664 #endif
2665
2666 int
2667 wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
2668 {
2669 off_t off;
2670 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2671 int logblklen = 1<<wch->wc_log_dev_bshift;
2672 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2673 void *scratch1 = wapbl_malloc(MAXBSIZE);
2674 int error = 0;
2675
2676 KDASSERT(wapbl_replay_isopen(wr));
2677
2678 /*
2679 * This parses the journal for replay, although it could
2680 * just as easily walk the hashtable instead.
2681 */
2682
2683 off = wch->wc_tail;
2684 while (off != wch->wc_head) {
2685 struct wapbl_wc_null *wcn;
2686 #ifdef DEBUG
2687 off_t saveoff = off;
2688 #endif
2689 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2690 if (error)
2691 goto out;
2692 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2693 switch (wcn->wc_type) {
2694 case WAPBL_WC_BLOCKS:
2695 {
2696 struct wapbl_wc_blocklist *wc =
2697 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2698 int i;
2699 for (i = 0; i < wc->wc_blkcount; i++) {
2700 int j, n;
2701 /*
2702 * Check each physical block against
2703 * the hashtable independently
2704 */
2705 n = wc->wc_blocks[i].wc_dlen >>
2706 wch->wc_fs_dev_bshift;
2707 for (j = 0; j < n; j++) {
2708 struct wapbl_blk *wb =
2709 wapbl_blkhash_get(wr,
2710 wc->wc_blocks[i].wc_daddr + j);
2711 if (wb && (wb->wb_off == off)) {
2712 error = wapbl_circ_read(
2713 wr, scratch1,
2714 fsblklen, &off);
2715 if (error)
2716 goto out;
2717 error =
2718 wapbl_write(scratch1,
2719 fsblklen, fsdevvp,
2720 wb->wb_blk);
2721 if (error)
2722 goto out;
2723 } else {
2724 wapbl_circ_advance(wr,
2725 fsblklen, &off);
2726 }
2727 }
2728 }
2729 }
2730 break;
2731 case WAPBL_WC_REVOCATIONS:
2732 case WAPBL_WC_INODES:
2733 break;
2734 default:
2735 KASSERT(0);
2736 }
2737 #ifdef DEBUG
2738 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2739 KASSERT(off == saveoff);
2740 #endif
2741 }
2742 out:
2743 wapbl_free(scratch1);
2744 return error;
2745 }
2746
2747 int
2748 wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
2749 {
2750 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2751 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2752
2753 KDASSERT(wapbl_replay_isopen(wr));
2754
2755 KASSERT((len % fsblklen) == 0);
2756
2757 while (len != 0) {
2758 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2759 if (wb) {
2760 off_t off = wb->wb_off;
2761 int error;
2762 error = wapbl_circ_read(wr, data, fsblklen, &off);
2763 if (error)
2764 return error;
2765 }
2766 data = (uint8_t *)data + fsblklen;
2767 len -= fsblklen;
2768 blk++;
2769 }
2770 return 0;
2771 }
2772