vfs_wapbl.c revision 1.13 1 /* $NetBSD: vfs_wapbl.c,v 1.13 2008/11/18 19:31:35 joerg Exp $ */
2
3 /*-
4 * Copyright (c) 2003,2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This implements file system independent write ahead filesystem logging.
34 */
35
36 #define WAPBL_INTERNAL
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.13 2008/11/18 19:31:35 joerg Exp $");
40
41 #include <sys/param.h>
42
43 #ifdef _KERNEL
44 #include <sys/param.h>
45 #include <sys/namei.h>
46 #include <sys/proc.h>
47 #include <sys/uio.h>
48 #include <sys/vnode.h>
49 #include <sys/file.h>
50 #include <sys/malloc.h>
51 #include <sys/resourcevar.h>
52 #include <sys/conf.h>
53 #include <sys/mount.h>
54 #include <sys/kernel.h>
55 #include <sys/kauth.h>
56 #include <sys/mutex.h>
57 #include <sys/atomic.h>
58 #include <sys/wapbl.h>
59
60 #if WAPBL_UVM_ALLOC
61 #include <uvm/uvm.h>
62 #endif
63
64 #include <miscfs/specfs/specdev.h>
65
66 MALLOC_JUSTDEFINE(M_WAPBL, "wapbl", "write-ahead physical block logging");
67 #define wapbl_malloc(s) malloc((s), M_WAPBL, M_WAITOK)
68 #define wapbl_free(a) free((a), M_WAPBL)
69 #define wapbl_calloc(n, s) malloc((n)*(s), M_WAPBL, M_WAITOK | M_ZERO)
70 #define wapbl_realloc(ptr, s) realloc((ptr), (s), M_WAPBL, M_WAITOK | M_ZERO)
71
72 #else /* !_KERNEL */
73 #include <assert.h>
74 #include <errno.h>
75 #include <stdio.h>
76 #include <stdbool.h>
77 #include <stdlib.h>
78 #include <string.h>
79
80 #include <sys/time.h>
81 #include <sys/wapbl.h>
82
83 #define KDASSERT(x) assert(x)
84 #define KASSERT(x) assert(x)
85 #define wapbl_malloc(s) malloc(s)
86 #define wapbl_free(a) free(a)
87 #define wapbl_calloc(n, s) calloc((n), (s))
88 #define wapbl_realloc(ptr, s) realloc((ptr), (s))
89
90 #endif /* !_KERNEL */
91
92 /*
93 * INTERNAL DATA STRUCTURES
94 */
95
96 /*
97 * This structure holds per-mount log information.
98 *
99 * Legend: a = atomic access only
100 * r = read-only after init
101 * l = rwlock held
102 * m = mutex held
103 * u = unlocked access ok
104 * b = bufcache_lock held
105 */
106 struct wapbl {
107 struct vnode *wl_logvp; /* r: log here */
108 struct vnode *wl_devvp; /* r: log on this device */
109 struct mount *wl_mount; /* r: mountpoint wl is associated with */
110 daddr_t wl_logpbn; /* r: Physical block number of start of log */
111 int wl_log_dev_bshift; /* r: logarithm of device block size of log
112 device */
113 int wl_fs_dev_bshift; /* r: logarithm of device block size of
114 filesystem device */
115
116 unsigned wl_lock_count; /* m: Count of transactions in progress */
117
118 size_t wl_circ_size; /* r: Number of bytes in buffer of log */
119 size_t wl_circ_off; /* r: Number of bytes reserved at start */
120
121 size_t wl_bufcount_max; /* r: Number of buffers reserved for log */
122 size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */
123
124 off_t wl_head; /* l: Byte offset of log head */
125 off_t wl_tail; /* l: Byte offset of log tail */
126 /*
127 * head == tail == 0 means log is empty
128 * head == tail != 0 means log is full
129 * see assertions in wapbl_advance() for other boundary conditions.
130 * only truncate moves the tail, except when flush sets it to
131 * wl_header_size only flush moves the head, except when truncate
132 * sets it to 0.
133 */
134
135 struct wapbl_wc_header *wl_wc_header; /* l */
136 void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */
137
138 kmutex_t wl_mtx; /* u: short-term lock */
139 krwlock_t wl_rwlock; /* u: File system transaction lock */
140
141 /*
142 * Must be held while accessing
143 * wl_count or wl_bufs or head or tail
144 */
145
146 /*
147 * Callback called from within the flush routine to flush any extra
148 * bits. Note that flush may be skipped without calling this if
149 * there are no outstanding buffers in the transaction.
150 */
151 #if _KERNEL
152 wapbl_flush_fn_t wl_flush; /* r */
153 wapbl_flush_fn_t wl_flush_abort;/* r */
154 #endif
155
156 size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */
157 size_t wl_bufcount; /* m: Count of buffers in wl_bufs */
158 size_t wl_bcount; /* m: Total bcount of wl_bufs */
159
160 LIST_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
161
162 kcondvar_t wl_reclaimable_cv; /* m (obviously) */
163 size_t wl_reclaimable_bytes; /* m: Amount of space available for
164 reclamation by truncate */
165 int wl_error_count; /* m: # of wl_entries with errors */
166 size_t wl_reserved_bytes; /* never truncate log smaller than this */
167
168 #ifdef WAPBL_DEBUG_BUFBYTES
169 size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
170 #endif
171
172 daddr_t *wl_deallocblks;/* l: address of block */
173 int *wl_dealloclens; /* l: size of block (fragments, kom ihg) */
174 int wl_dealloccnt; /* l: total count */
175 int wl_dealloclim; /* l: max count */
176
177 /* hashtable of inode numbers for allocated but unlinked inodes */
178 /* synch ??? */
179 LIST_HEAD(wapbl_ino_head, wapbl_ino) *wl_inohash;
180 u_long wl_inohashmask;
181 int wl_inohashcnt;
182
183 SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* On disk transaction
184 accounting */
185 };
186
187 #ifdef WAPBL_DEBUG_PRINT
188 int wapbl_debug_print = WAPBL_DEBUG_PRINT;
189 #endif
190
191 /****************************************************************/
192 #ifdef _KERNEL
193
194 #ifdef WAPBL_DEBUG
195 struct wapbl *wapbl_debug_wl;
196 #endif
197
198 static int wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail);
199 static int wapbl_write_blocks(struct wapbl *wl, off_t *offp);
200 static int wapbl_write_revocations(struct wapbl *wl, off_t *offp);
201 static int wapbl_write_inodes(struct wapbl *wl, off_t *offp);
202 #endif /* _KERNEL */
203
204 static int wapbl_replay_process(struct wapbl_replay *wr);
205
206 static __inline size_t wapbl_space_free(size_t avail, off_t head,
207 off_t tail);
208 static __inline size_t wapbl_space_used(size_t avail, off_t head,
209 off_t tail);
210
211 #ifdef _KERNEL
212
213 #define WAPBL_INODETRK_SIZE 83
214 static int wapbl_ino_pool_refcount;
215 static struct pool wapbl_ino_pool;
216 struct wapbl_ino {
217 LIST_ENTRY(wapbl_ino) wi_hash;
218 ino_t wi_ino;
219 mode_t wi_mode;
220 };
221
222 static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
223 static void wapbl_inodetrk_free(struct wapbl *wl);
224 static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
225
226 static size_t wapbl_transaction_len(struct wapbl *wl);
227 static __inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
228
229 #if 0
230 int wapbl_replay_verify(struct wapbl_replay *, struct vnode *);
231 #endif
232
233 static int wapbl_replay_isopen1(struct wapbl_replay *);
234
235 /*
236 * This is useful for debugging. If set, the log will
237 * only be truncated when necessary.
238 */
239 int wapbl_lazy_truncate = 0;
240
241 struct wapbl_ops wapbl_ops = {
242 .wo_wapbl_discard = wapbl_discard,
243 .wo_wapbl_replay_isopen = wapbl_replay_isopen1,
244 .wo_wapbl_replay_can_read = wapbl_replay_can_read,
245 .wo_wapbl_replay_read = wapbl_replay_read,
246 .wo_wapbl_add_buf = wapbl_add_buf,
247 .wo_wapbl_remove_buf = wapbl_remove_buf,
248 .wo_wapbl_resize_buf = wapbl_resize_buf,
249 .wo_wapbl_begin = wapbl_begin,
250 .wo_wapbl_end = wapbl_end,
251 .wo_wapbl_junlock_assert= wapbl_junlock_assert,
252
253 /* XXX: the following is only used to say "this is a wapbl buf" */
254 .wo_wapbl_biodone = wapbl_biodone,
255 };
256
257 void
258 wapbl_init()
259 {
260
261 malloc_type_attach(M_WAPBL);
262 }
263
264 int
265 wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
266 daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
267 wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
268 {
269 struct wapbl *wl;
270 struct vnode *devvp;
271 daddr_t logpbn;
272 int error;
273 int log_dev_bshift = DEV_BSHIFT;
274 int fs_dev_bshift = DEV_BSHIFT;
275 int run;
276
277 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_start: vp=%p off=%" PRId64
278 " count=%zu blksize=%zu\n", vp, off, count, blksize));
279
280 if (log_dev_bshift > fs_dev_bshift) {
281 WAPBL_PRINTF(WAPBL_PRINT_OPEN,
282 ("wapbl: log device's block size cannot be larger "
283 "than filesystem's\n"));
284 /*
285 * Not currently implemented, although it could be if
286 * needed someday.
287 */
288 return ENOSYS;
289 }
290
291 if (off < 0)
292 return EINVAL;
293
294 if (blksize < DEV_BSIZE)
295 return EINVAL;
296 if (blksize % DEV_BSIZE)
297 return EINVAL;
298
299 /* XXXTODO: verify that the full load is writable */
300
301 /*
302 * XXX check for minimum log size
303 * minimum is governed by minimum amount of space
304 * to complete a transaction. (probably truncate)
305 */
306 /* XXX for now pick something minimal */
307 if ((count * blksize) < MAXPHYS) {
308 return ENOSPC;
309 }
310
311 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
312 return error;
313 }
314
315 wl = wapbl_calloc(1, sizeof(*wl));
316 rw_init(&wl->wl_rwlock);
317 mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
318 cv_init(&wl->wl_reclaimable_cv, "wapblrec");
319 LIST_INIT(&wl->wl_bufs);
320 SIMPLEQ_INIT(&wl->wl_entries);
321
322 wl->wl_logvp = vp;
323 wl->wl_devvp = devvp;
324 wl->wl_mount = mp;
325 wl->wl_logpbn = logpbn;
326 wl->wl_log_dev_bshift = log_dev_bshift;
327 wl->wl_fs_dev_bshift = fs_dev_bshift;
328
329 wl->wl_flush = flushfn;
330 wl->wl_flush_abort = flushabortfn;
331
332 /* Reserve two log device blocks for the commit headers */
333 wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
334 wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
335 /* truncate the log usage to a multiple of log_dev_bshift */
336 wl->wl_circ_size >>= wl->wl_log_dev_bshift;
337 wl->wl_circ_size <<= wl->wl_log_dev_bshift;
338
339 /*
340 * wl_bufbytes_max limits the size of the in memory transaction space.
341 * - Since buffers are allocated and accounted for in units of
342 * PAGE_SIZE it is required to be a multiple of PAGE_SIZE
343 * (i.e. 1<<PAGE_SHIFT)
344 * - Since the log device has to be written in units of
345 * 1<<wl_log_dev_bshift it is required to be a mulitple of
346 * 1<<wl_log_dev_bshift.
347 * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
348 * it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
349 * Therefore it must be multiple of the least common multiple of those
350 * three quantities. Fortunately, all of those quantities are
351 * guaranteed to be a power of two, and the least common multiple of
352 * a set of numbers which are all powers of two is simply the maximum
353 * of those numbers. Finally, the maximum logarithm of a power of two
354 * is the same as the log of the maximum power of two. So we can do
355 * the following operations to size wl_bufbytes_max:
356 */
357
358 /* XXX fix actual number of pages reserved per filesystem. */
359 wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
360
361 /* Round wl_bufbytes_max to the largest power of two constraint */
362 wl->wl_bufbytes_max >>= PAGE_SHIFT;
363 wl->wl_bufbytes_max <<= PAGE_SHIFT;
364 wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
365 wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
366 wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
367 wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
368
369 /* XXX maybe use filesystem fragment size instead of 1024 */
370 /* XXX fix actual number of buffers reserved per filesystem. */
371 wl->wl_bufcount_max = (nbuf / 2) * 1024;
372
373 /* XXX tie this into resource estimation */
374 wl->wl_dealloclim = 2 * btodb(wl->wl_bufbytes_max);
375
376 #if WAPBL_UVM_ALLOC
377 wl->wl_deallocblks = (void *) uvm_km_zalloc(kernel_map,
378 round_page(sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim));
379 KASSERT(wl->wl_deallocblks != NULL);
380 wl->wl_dealloclens = (void *) uvm_km_zalloc(kernel_map,
381 round_page(sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim));
382 KASSERT(wl->wl_dealloclens != NULL);
383 #else
384 wl->wl_deallocblks = wapbl_malloc(sizeof(*wl->wl_deallocblks) *
385 wl->wl_dealloclim);
386 wl->wl_dealloclens = wapbl_malloc(sizeof(*wl->wl_dealloclens) *
387 wl->wl_dealloclim);
388 #endif
389
390 wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
391
392 /* Initialize the commit header */
393 {
394 struct wapbl_wc_header *wc;
395 size_t len = 1<<wl->wl_log_dev_bshift;
396 wc = wapbl_calloc(1, len);
397 wc->wc_type = WAPBL_WC_HEADER;
398 wc->wc_len = len;
399 wc->wc_circ_off = wl->wl_circ_off;
400 wc->wc_circ_size = wl->wl_circ_size;
401 /* XXX wc->wc_fsid */
402 wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
403 wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
404 wl->wl_wc_header = wc;
405 wl->wl_wc_scratch = wapbl_malloc(len);
406 }
407
408 /*
409 * if there was an existing set of unlinked but
410 * allocated inodes, preserve it in the new
411 * log.
412 */
413 if (wr && wr->wr_inodescnt) {
414 int i;
415
416 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
417 ("wapbl_start: reusing log with %d inodes\n",
418 wr->wr_inodescnt));
419
420 /*
421 * Its only valid to reuse the replay log if its
422 * the same as the new log we just opened.
423 */
424 KDASSERT(!wapbl_replay_isopen(wr));
425 KASSERT(devvp->v_rdev == wr->wr_devvp->v_rdev);
426 KASSERT(logpbn == wr->wr_logpbn);
427 KASSERT(wl->wl_circ_size == wr->wr_wc_header.wc_circ_size);
428 KASSERT(wl->wl_circ_off == wr->wr_wc_header.wc_circ_off);
429 KASSERT(wl->wl_log_dev_bshift ==
430 wr->wr_wc_header.wc_log_dev_bshift);
431 KASSERT(wl->wl_fs_dev_bshift ==
432 wr->wr_wc_header.wc_fs_dev_bshift);
433
434 wl->wl_wc_header->wc_generation =
435 wr->wr_wc_header.wc_generation + 1;
436
437 for (i = 0; i < wr->wr_inodescnt; i++)
438 wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
439 wr->wr_inodes[i].wr_imode);
440
441 /* Make sure new transaction won't overwrite old inodes list */
442 KDASSERT(wapbl_transaction_len(wl) <=
443 wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
444 wr->wr_inodestail));
445
446 wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
447 wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
448 wapbl_transaction_len(wl);
449
450 error = wapbl_write_inodes(wl, &wl->wl_head);
451 if (error)
452 goto errout;
453
454 KASSERT(wl->wl_head != wl->wl_tail);
455 KASSERT(wl->wl_head != 0);
456 }
457
458 error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
459 if (error) {
460 goto errout;
461 }
462
463 *wlp = wl;
464 #if defined(WAPBL_DEBUG)
465 wapbl_debug_wl = wl;
466 #endif
467
468 return 0;
469 errout:
470 wapbl_discard(wl);
471 wapbl_free(wl->wl_wc_scratch);
472 wapbl_free(wl->wl_wc_header);
473 #if WAPBL_UVM_ALLOC
474 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_deallocblks,
475 round_page(sizeof(*wl->wl_deallocblks *
476 wl->wl_dealloclim)));
477 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_dealloclens,
478 round_page(sizeof(*wl->wl_dealloclens *
479 wl->wl_dealloclim)));
480 #else
481 wapbl_free(wl->wl_deallocblks);
482 wapbl_free(wl->wl_dealloclens);
483 #endif
484 wapbl_inodetrk_free(wl);
485 wapbl_free(wl);
486
487 return error;
488 }
489
490 /*
491 * Like wapbl_flush, only discards the transaction
492 * completely
493 */
494
495 void
496 wapbl_discard(struct wapbl *wl)
497 {
498 struct wapbl_entry *we;
499 struct buf *bp;
500 int i;
501
502 /*
503 * XXX we may consider using upgrade here
504 * if we want to call flush from inside a transaction
505 */
506 rw_enter(&wl->wl_rwlock, RW_WRITER);
507 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
508 wl->wl_dealloccnt);
509
510 #ifdef WAPBL_DEBUG_PRINT
511 {
512 struct wapbl_entry *we;
513 pid_t pid = -1;
514 lwpid_t lid = -1;
515 if (curproc)
516 pid = curproc->p_pid;
517 if (curlwp)
518 lid = curlwp->l_lid;
519 #ifdef WAPBL_DEBUG_BUFBYTES
520 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
521 ("wapbl_discard: thread %d.%d discarding "
522 "transaction\n"
523 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
524 "deallocs=%d inodes=%d\n"
525 "\terrcnt = %u, reclaimable=%zu reserved=%zu "
526 "unsynced=%zu\n",
527 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
528 wl->wl_bcount, wl->wl_dealloccnt,
529 wl->wl_inohashcnt, wl->wl_error_count,
530 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
531 wl->wl_unsynced_bufbytes));
532 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
533 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
534 ("\tentry: bufcount = %zu, reclaimable = %zu, "
535 "error = %d, unsynced = %zu\n",
536 we->we_bufcount, we->we_reclaimable_bytes,
537 we->we_error, we->we_unsynced_bufbytes));
538 }
539 #else /* !WAPBL_DEBUG_BUFBYTES */
540 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
541 ("wapbl_discard: thread %d.%d discarding transaction\n"
542 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
543 "deallocs=%d inodes=%d\n"
544 "\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
545 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
546 wl->wl_bcount, wl->wl_dealloccnt,
547 wl->wl_inohashcnt, wl->wl_error_count,
548 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes));
549 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
550 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
551 ("\tentry: bufcount = %zu, reclaimable = %zu, "
552 "error = %d\n",
553 we->we_bufcount, we->we_reclaimable_bytes,
554 we->we_error));
555 }
556 #endif /* !WAPBL_DEBUG_BUFBYTES */
557 }
558 #endif /* WAPBL_DEBUG_PRINT */
559
560 for (i = 0; i <= wl->wl_inohashmask; i++) {
561 struct wapbl_ino_head *wih;
562 struct wapbl_ino *wi;
563
564 wih = &wl->wl_inohash[i];
565 while ((wi = LIST_FIRST(wih)) != NULL) {
566 LIST_REMOVE(wi, wi_hash);
567 pool_put(&wapbl_ino_pool, wi);
568 KASSERT(wl->wl_inohashcnt > 0);
569 wl->wl_inohashcnt--;
570 }
571 }
572
573 /*
574 * clean buffer list
575 */
576 mutex_enter(&bufcache_lock);
577 mutex_enter(&wl->wl_mtx);
578 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
579 if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
580 /*
581 * The buffer will be unlocked and
582 * removed from the transaction in brelse
583 */
584 mutex_exit(&wl->wl_mtx);
585 brelsel(bp, 0);
586 mutex_enter(&wl->wl_mtx);
587 }
588 }
589 mutex_exit(&wl->wl_mtx);
590 mutex_exit(&bufcache_lock);
591
592 /*
593 * Remove references to this wl from wl_entries, free any which
594 * no longer have buffers, others will be freed in wapbl_biodone
595 * when they no longer have any buffers.
596 */
597 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
598 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
599 /* XXX should we be accumulating wl_error_count
600 * and increasing reclaimable bytes ? */
601 we->we_wapbl = NULL;
602 if (we->we_bufcount == 0) {
603 #ifdef WAPBL_DEBUG_BUFBYTES
604 KASSERT(we->we_unsynced_bufbytes == 0);
605 #endif
606 wapbl_free(we);
607 }
608 }
609
610 /* Discard list of deallocs */
611 wl->wl_dealloccnt = 0;
612 /* XXX should we clear wl_reserved_bytes? */
613
614 KASSERT(wl->wl_bufbytes == 0);
615 KASSERT(wl->wl_bcount == 0);
616 KASSERT(wl->wl_bufcount == 0);
617 KASSERT(LIST_EMPTY(&wl->wl_bufs));
618 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
619 KASSERT(wl->wl_inohashcnt == 0);
620
621 rw_exit(&wl->wl_rwlock);
622 }
623
624 int
625 wapbl_stop(struct wapbl *wl, int force)
626 {
627 struct vnode *vp;
628 int error;
629
630 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
631 error = wapbl_flush(wl, 1);
632 if (error) {
633 if (force)
634 wapbl_discard(wl);
635 else
636 return error;
637 }
638
639 /* Unlinked inodes persist after a flush */
640 if (wl->wl_inohashcnt) {
641 if (force) {
642 wapbl_discard(wl);
643 } else {
644 return EBUSY;
645 }
646 }
647
648 KASSERT(wl->wl_bufbytes == 0);
649 KASSERT(wl->wl_bcount == 0);
650 KASSERT(wl->wl_bufcount == 0);
651 KASSERT(LIST_EMPTY(&wl->wl_bufs));
652 KASSERT(wl->wl_dealloccnt == 0);
653 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
654 KASSERT(wl->wl_inohashcnt == 0);
655
656 vp = wl->wl_logvp;
657
658 wapbl_free(wl->wl_wc_scratch);
659 wapbl_free(wl->wl_wc_header);
660 #if WAPBL_UVM_ALLOC
661 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_deallocblks,
662 round_page(sizeof(*wl->wl_deallocblks *
663 wl->wl_dealloclim)));
664 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_dealloclens,
665 round_page(sizeof(*wl->wl_dealloclens *
666 wl->wl_dealloclim)));
667 #else
668 wapbl_free(wl->wl_deallocblks);
669 wapbl_free(wl->wl_dealloclens);
670 #endif
671 wapbl_inodetrk_free(wl);
672
673 cv_destroy(&wl->wl_reclaimable_cv);
674 mutex_destroy(&wl->wl_mtx);
675 rw_destroy(&wl->wl_rwlock);
676 wapbl_free(wl);
677
678 return 0;
679 }
680
681 static int
682 wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
683 {
684 struct pstats *pstats = curlwp->l_proc->p_stats;
685 struct buf *bp;
686 int error;
687
688 KASSERT((flags & ~(B_WRITE | B_READ)) == 0);
689 KASSERT(devvp->v_type == VBLK);
690
691 if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
692 mutex_enter(&devvp->v_interlock);
693 devvp->v_numoutput++;
694 mutex_exit(&devvp->v_interlock);
695 pstats->p_ru.ru_oublock++;
696 } else {
697 pstats->p_ru.ru_inblock++;
698 }
699
700 bp = getiobuf(devvp, true);
701 bp->b_flags = flags;
702 bp->b_cflags = BC_BUSY; /* silly & dubious */
703 bp->b_dev = devvp->v_rdev;
704 bp->b_data = data;
705 bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
706 bp->b_blkno = pbn;
707
708 WAPBL_PRINTF(WAPBL_PRINT_IO,
709 ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%x\n",
710 BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
711 bp->b_blkno, bp->b_dev));
712
713 VOP_STRATEGY(devvp, bp);
714
715 error = biowait(bp);
716 putiobuf(bp);
717
718 if (error) {
719 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
720 ("wapbl_doio: %s %zu bytes at block %" PRId64
721 " on dev 0x%x failed with error %d\n",
722 (((flags & (B_WRITE | B_READ)) == B_WRITE) ?
723 "write" : "read"),
724 len, pbn, devvp->v_rdev, error));
725 }
726
727 return error;
728 }
729
730 int
731 wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
732 {
733
734 return wapbl_doio(data, len, devvp, pbn, B_WRITE);
735 }
736
737 int
738 wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
739 {
740
741 return wapbl_doio(data, len, devvp, pbn, B_READ);
742 }
743
744 /*
745 * Off is byte offset returns new offset for next write
746 * handles log wraparound
747 */
748 static int
749 wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
750 {
751 size_t slen;
752 off_t off = *offp;
753 int error;
754
755 KDASSERT(((len >> wl->wl_log_dev_bshift) <<
756 wl->wl_log_dev_bshift) == len);
757
758 if (off < wl->wl_circ_off)
759 off = wl->wl_circ_off;
760 slen = wl->wl_circ_off + wl->wl_circ_size - off;
761 if (slen < len) {
762 error = wapbl_write(data, slen, wl->wl_devvp,
763 wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
764 if (error)
765 return error;
766 data = (uint8_t *)data + slen;
767 len -= slen;
768 off = wl->wl_circ_off;
769 }
770 error = wapbl_write(data, len, wl->wl_devvp,
771 wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
772 if (error)
773 return error;
774 off += len;
775 if (off >= wl->wl_circ_off + wl->wl_circ_size)
776 off = wl->wl_circ_off;
777 *offp = off;
778 return 0;
779 }
780
781 /****************************************************************/
782
783 int
784 wapbl_begin(struct wapbl *wl, const char *file, int line)
785 {
786 int doflush;
787 unsigned lockcount;
788 krw_t op;
789
790 KDASSERT(wl);
791
792 /*
793 * XXX: The original code calls for the use of a RW_READER lock
794 * here, but it turns out there are performance issues with high
795 * metadata-rate workloads (e.g. multiple simultaneous tar
796 * extractions). For now, we force the lock to be RW_WRITER,
797 * since that currently has the best performance characteristics
798 * (even for a single tar-file extraction).
799 *
800 */
801 #define WAPBL_DEBUG_SERIALIZE 1
802
803 #ifdef WAPBL_DEBUG_SERIALIZE
804 op = RW_WRITER;
805 #else
806 op = RW_READER;
807 #endif
808
809 /*
810 * XXX this needs to be made much more sophisticated.
811 * perhaps each wapbl_begin could reserve a specified
812 * number of buffers and bytes.
813 */
814 mutex_enter(&wl->wl_mtx);
815 lockcount = wl->wl_lock_count;
816 doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) >
817 wl->wl_bufbytes_max / 2) ||
818 ((wl->wl_bufcount + (lockcount * 10)) >
819 wl->wl_bufcount_max / 2) ||
820 (wapbl_transaction_len(wl) > wl->wl_circ_size / 2);
821 mutex_exit(&wl->wl_mtx);
822
823 if (doflush) {
824 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
825 ("force flush lockcnt=%d bufbytes=%zu "
826 "(max=%zu) bufcount=%zu (max=%zu)\n",
827 lockcount, wl->wl_bufbytes,
828 wl->wl_bufbytes_max, wl->wl_bufcount,
829 wl->wl_bufcount_max));
830 }
831
832 if (doflush) {
833 int error = wapbl_flush(wl, 0);
834 if (error)
835 return error;
836 }
837
838 rw_enter(&wl->wl_rwlock, op);
839 mutex_enter(&wl->wl_mtx);
840 wl->wl_lock_count++;
841 mutex_exit(&wl->wl_mtx);
842
843 #if defined(WAPBL_DEBUG_PRINT) && defined(WAPBL_DEBUG_SERIALIZE)
844 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
845 ("wapbl_begin thread %d.%d with bufcount=%zu "
846 "bufbytes=%zu bcount=%zu at %s:%d\n",
847 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
848 wl->wl_bufbytes, wl->wl_bcount, file, line));
849 #endif
850
851 return 0;
852 }
853
854 void
855 wapbl_end(struct wapbl *wl)
856 {
857
858 #if defined(WAPBL_DEBUG_PRINT) && defined(WAPBL_DEBUG_SERIALIZE)
859 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
860 ("wapbl_end thread %d.%d with bufcount=%zu "
861 "bufbytes=%zu bcount=%zu\n",
862 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
863 wl->wl_bufbytes, wl->wl_bcount));
864 #endif
865
866 mutex_enter(&wl->wl_mtx);
867 KASSERT(wl->wl_lock_count > 0);
868 wl->wl_lock_count--;
869 mutex_exit(&wl->wl_mtx);
870
871 rw_exit(&wl->wl_rwlock);
872 }
873
874 void
875 wapbl_add_buf(struct wapbl *wl, struct buf * bp)
876 {
877
878 KASSERT(bp->b_cflags & BC_BUSY);
879 KASSERT(bp->b_vp);
880
881 wapbl_jlock_assert(wl);
882
883 #if 0
884 /*
885 * XXX this might be an issue for swapfiles.
886 * see uvm_swap.c:1702
887 *
888 * XXX2 why require it then? leap of semantics?
889 */
890 KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
891 #endif
892
893 mutex_enter(&wl->wl_mtx);
894 if (bp->b_flags & B_LOCKED) {
895 LIST_REMOVE(bp, b_wapbllist);
896 WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
897 ("wapbl_add_buf thread %d.%d re-adding buf %p "
898 "with %d bytes %d bcount\n",
899 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
900 bp->b_bcount));
901 } else {
902 /* unlocked by dirty buffers shouldn't exist */
903 KASSERT(!(bp->b_oflags & BO_DELWRI));
904 wl->wl_bufbytes += bp->b_bufsize;
905 wl->wl_bcount += bp->b_bcount;
906 wl->wl_bufcount++;
907 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
908 ("wapbl_add_buf thread %d.%d adding buf %p "
909 "with %d bytes %d bcount\n",
910 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
911 bp->b_bcount));
912 }
913 LIST_INSERT_HEAD(&wl->wl_bufs, bp, b_wapbllist);
914 mutex_exit(&wl->wl_mtx);
915
916 bp->b_flags |= B_LOCKED;
917 }
918
919 static void
920 wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
921 {
922
923 KASSERT(mutex_owned(&wl->wl_mtx));
924 KASSERT(bp->b_cflags & BC_BUSY);
925 wapbl_jlock_assert(wl);
926
927 #if 0
928 /*
929 * XXX this might be an issue for swapfiles.
930 * see uvm_swap.c:1725
931 *
932 * XXXdeux: see above
933 */
934 KASSERT((bp->b_flags & BC_NOCACHE) == 0);
935 #endif
936 KASSERT(bp->b_flags & B_LOCKED);
937
938 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
939 ("wapbl_remove_buf thread %d.%d removing buf %p with "
940 "%d bytes %d bcount\n",
941 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, bp->b_bcount));
942
943 KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
944 wl->wl_bufbytes -= bp->b_bufsize;
945 KASSERT(wl->wl_bcount >= bp->b_bcount);
946 wl->wl_bcount -= bp->b_bcount;
947 KASSERT(wl->wl_bufcount > 0);
948 wl->wl_bufcount--;
949 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
950 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
951 LIST_REMOVE(bp, b_wapbllist);
952
953 bp->b_flags &= ~B_LOCKED;
954 }
955
956 /* called from brelsel() in vfs_bio among other places */
957 void
958 wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
959 {
960
961 mutex_enter(&wl->wl_mtx);
962 wapbl_remove_buf_locked(wl, bp);
963 mutex_exit(&wl->wl_mtx);
964 }
965
966 void
967 wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
968 {
969
970 KASSERT(bp->b_cflags & BC_BUSY);
971
972 /*
973 * XXX: why does this depend on B_LOCKED? otherwise the buf
974 * is not for a transaction? if so, why is this called in the
975 * first place?
976 */
977 if (bp->b_flags & B_LOCKED) {
978 mutex_enter(&wl->wl_mtx);
979 wl->wl_bufbytes += bp->b_bufsize - oldsz;
980 wl->wl_bcount += bp->b_bcount - oldcnt;
981 mutex_exit(&wl->wl_mtx);
982 }
983 }
984
985 #endif /* _KERNEL */
986
987 /****************************************************************/
988 /* Some utility inlines */
989
990 /* This is used to advance the pointer at old to new value at old+delta */
991 static __inline off_t
992 wapbl_advance(size_t size, size_t off, off_t old, size_t delta)
993 {
994 off_t new;
995
996 /* Define acceptable ranges for inputs. */
997 KASSERT(delta <= size);
998 KASSERT((old == 0) || (old >= off));
999 KASSERT(old < (size + off));
1000
1001 if ((old == 0) && (delta != 0))
1002 new = off + delta;
1003 else if ((old + delta) < (size + off))
1004 new = old + delta;
1005 else
1006 new = (old + delta) - size;
1007
1008 /* Note some interesting axioms */
1009 KASSERT((delta != 0) || (new == old));
1010 KASSERT((delta == 0) || (new != 0));
1011 KASSERT((delta != (size)) || (new == old));
1012
1013 /* Define acceptable ranges for output. */
1014 KASSERT((new == 0) || (new >= off));
1015 KASSERT(new < (size + off));
1016 return new;
1017 }
1018
1019 static __inline size_t
1020 wapbl_space_used(size_t avail, off_t head, off_t tail)
1021 {
1022
1023 if (tail == 0) {
1024 KASSERT(head == 0);
1025 return 0;
1026 }
1027 return ((head + (avail - 1) - tail) % avail) + 1;
1028 }
1029
1030 static __inline size_t
1031 wapbl_space_free(size_t avail, off_t head, off_t tail)
1032 {
1033
1034 return avail - wapbl_space_used(avail, head, tail);
1035 }
1036
1037 static __inline void
1038 wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1039 off_t *tailp)
1040 {
1041 off_t head = *headp;
1042 off_t tail = *tailp;
1043
1044 KASSERT(delta <= wapbl_space_free(size, head, tail));
1045 head = wapbl_advance(size, off, head, delta);
1046 if ((tail == 0) && (head != 0))
1047 tail = off;
1048 *headp = head;
1049 *tailp = tail;
1050 }
1051
1052 static __inline void
1053 wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1054 off_t *tailp)
1055 {
1056 off_t head = *headp;
1057 off_t tail = *tailp;
1058
1059 KASSERT(delta <= wapbl_space_used(size, head, tail));
1060 tail = wapbl_advance(size, off, tail, delta);
1061 if (head == tail) {
1062 head = tail = 0;
1063 }
1064 *headp = head;
1065 *tailp = tail;
1066 }
1067
1068 #ifdef _KERNEL
1069
1070 /****************************************************************/
1071
1072 /*
1073 * Remove transactions whose buffers are completely flushed to disk.
1074 * Will block until at least minfree space is available.
1075 * only intended to be called from inside wapbl_flush and therefore
1076 * does not protect against commit races with itself or with flush.
1077 */
1078 static int
1079 wapbl_truncate(struct wapbl *wl, size_t minfree, int waitonly)
1080 {
1081 size_t delta;
1082 size_t avail;
1083 off_t head;
1084 off_t tail;
1085 int error = 0;
1086
1087 KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1088 KASSERT(rw_write_held(&wl->wl_rwlock));
1089
1090 mutex_enter(&wl->wl_mtx);
1091
1092 /*
1093 * First check to see if we have to do a commit
1094 * at all.
1095 */
1096 avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1097 if (minfree < avail) {
1098 mutex_exit(&wl->wl_mtx);
1099 return 0;
1100 }
1101 minfree -= avail;
1102 while ((wl->wl_error_count == 0) &&
1103 (wl->wl_reclaimable_bytes < minfree)) {
1104 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1105 ("wapbl_truncate: sleeping on %p wl=%p bytes=%zd "
1106 "minfree=%zd\n",
1107 &wl->wl_reclaimable_bytes, wl, wl->wl_reclaimable_bytes,
1108 minfree));
1109
1110 cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1111 }
1112 if (wl->wl_reclaimable_bytes < minfree) {
1113 KASSERT(wl->wl_error_count);
1114 /* XXX maybe get actual error from buffer instead someday? */
1115 error = EIO;
1116 }
1117 head = wl->wl_head;
1118 tail = wl->wl_tail;
1119 delta = wl->wl_reclaimable_bytes;
1120
1121 /* If all of of the entries are flushed, then be sure to keep
1122 * the reserved bytes reserved. Watch out for discarded transactions,
1123 * which could leave more bytes reserved than are reclaimable.
1124 */
1125 if (SIMPLEQ_EMPTY(&wl->wl_entries) &&
1126 (delta >= wl->wl_reserved_bytes)) {
1127 delta -= wl->wl_reserved_bytes;
1128 }
1129 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1130 &tail);
1131 KDASSERT(wl->wl_reserved_bytes <=
1132 wapbl_space_used(wl->wl_circ_size, head, tail));
1133 mutex_exit(&wl->wl_mtx);
1134
1135 if (error)
1136 return error;
1137
1138 if (waitonly)
1139 return 0;
1140
1141 /*
1142 * This is where head, tail and delta are unprotected
1143 * from races against itself or flush. This is ok since
1144 * we only call this routine from inside flush itself.
1145 *
1146 * XXX: how can it race against itself when accessed only
1147 * from behind the write-locked rwlock?
1148 */
1149 error = wapbl_write_commit(wl, head, tail);
1150 if (error)
1151 return error;
1152
1153 wl->wl_head = head;
1154 wl->wl_tail = tail;
1155
1156 mutex_enter(&wl->wl_mtx);
1157 KASSERT(wl->wl_reclaimable_bytes >= delta);
1158 wl->wl_reclaimable_bytes -= delta;
1159 mutex_exit(&wl->wl_mtx);
1160 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1161 ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1162 curproc->p_pid, curlwp->l_lid, delta));
1163
1164 return 0;
1165 }
1166
1167 /****************************************************************/
1168
1169 void
1170 wapbl_biodone(struct buf *bp)
1171 {
1172 struct wapbl_entry *we = bp->b_private;
1173 struct wapbl *wl = we->we_wapbl;
1174
1175 /*
1176 * Handle possible flushing of buffers after log has been
1177 * decomissioned.
1178 */
1179 if (!wl) {
1180 KASSERT(we->we_bufcount > 0);
1181 we->we_bufcount--;
1182 #ifdef WAPBL_DEBUG_BUFBYTES
1183 KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1184 we->we_unsynced_bufbytes -= bp->b_bufsize;
1185 #endif
1186
1187 if (we->we_bufcount == 0) {
1188 #ifdef WAPBL_DEBUG_BUFBYTES
1189 KASSERT(we->we_unsynced_bufbytes == 0);
1190 #endif
1191 wapbl_free(we);
1192 }
1193
1194 brelse(bp, 0);
1195 return;
1196 }
1197
1198 #ifdef ohbother
1199 KDASSERT(bp->b_flags & B_DONE);
1200 KDASSERT(!(bp->b_flags & B_DELWRI));
1201 KDASSERT(bp->b_flags & B_ASYNC);
1202 KDASSERT(bp->b_flags & B_BUSY);
1203 KDASSERT(!(bp->b_flags & B_LOCKED));
1204 KDASSERT(!(bp->b_flags & B_READ));
1205 KDASSERT(!(bp->b_flags & B_INVAL));
1206 KDASSERT(!(bp->b_flags & B_NOCACHE));
1207 #endif
1208
1209 if (bp->b_error) {
1210 #ifdef notyet /* Can't currently handle possible dirty buffer reuse */
1211 XXXpooka: interfaces not fully updated
1212 Note: this was not enabled in the original patch
1213 against netbsd4 either. I don't know if comment
1214 above is true or not.
1215
1216 /*
1217 * If an error occurs, report the error and leave the
1218 * buffer as a delayed write on the LRU queue.
1219 * restarting the write would likely result in
1220 * an error spinloop, so let it be done harmlessly
1221 * by the syncer.
1222 */
1223 bp->b_flags &= ~(B_DONE);
1224 simple_unlock(&bp->b_interlock);
1225
1226 if (we->we_error == 0) {
1227 mutex_enter(&wl->wl_mtx);
1228 wl->wl_error_count++;
1229 mutex_exit(&wl->wl_mtx);
1230 cv_broadcast(&wl->wl_reclaimable_cv);
1231 }
1232 we->we_error = bp->b_error;
1233 bp->b_error = 0;
1234 brelse(bp);
1235 return;
1236 #else
1237 /* For now, just mark the log permanently errored out */
1238
1239 mutex_enter(&wl->wl_mtx);
1240 if (wl->wl_error_count == 0) {
1241 wl->wl_error_count++;
1242 cv_broadcast(&wl->wl_reclaimable_cv);
1243 }
1244 mutex_exit(&wl->wl_mtx);
1245 #endif
1246 }
1247
1248 mutex_enter(&wl->wl_mtx);
1249
1250 KASSERT(we->we_bufcount > 0);
1251 we->we_bufcount--;
1252 #ifdef WAPBL_DEBUG_BUFBYTES
1253 KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1254 we->we_unsynced_bufbytes -= bp->b_bufsize;
1255 KASSERT(wl->wl_unsynced_bufbytes >= bp->b_bufsize);
1256 wl->wl_unsynced_bufbytes -= bp->b_bufsize;
1257 #endif
1258
1259 /*
1260 * If the current transaction can be reclaimed, start
1261 * at the beginning and reclaim any consecutive reclaimable
1262 * transactions. If we successfully reclaim anything,
1263 * then wakeup anyone waiting for the reclaim.
1264 */
1265 if (we->we_bufcount == 0) {
1266 size_t delta = 0;
1267 int errcnt = 0;
1268 #ifdef WAPBL_DEBUG_BUFBYTES
1269 KDASSERT(we->we_unsynced_bufbytes == 0);
1270 #endif
1271 /*
1272 * clear any posted error, since the buffer it came from
1273 * has successfully flushed by now
1274 */
1275 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1276 (we->we_bufcount == 0)) {
1277 delta += we->we_reclaimable_bytes;
1278 if (we->we_error)
1279 errcnt++;
1280 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1281 wapbl_free(we);
1282 }
1283
1284 if (delta) {
1285 wl->wl_reclaimable_bytes += delta;
1286 KASSERT(wl->wl_error_count >= errcnt);
1287 wl->wl_error_count -= errcnt;
1288 cv_broadcast(&wl->wl_reclaimable_cv);
1289 }
1290 }
1291
1292 mutex_exit(&wl->wl_mtx);
1293 brelse(bp, 0);
1294 }
1295
1296 /*
1297 * Write transactions to disk + start I/O for contents
1298 */
1299 int
1300 wapbl_flush(struct wapbl *wl, int waitfor)
1301 {
1302 struct buf *bp;
1303 struct wapbl_entry *we;
1304 off_t off;
1305 off_t head;
1306 off_t tail;
1307 size_t delta = 0;
1308 size_t flushsize;
1309 size_t reserved;
1310 int error = 0;
1311
1312 /*
1313 * Do a quick check to see if a full flush can be skipped
1314 * This assumes that the flush callback does not need to be called
1315 * unless there are other outstanding bufs.
1316 */
1317 if (!waitfor) {
1318 size_t nbufs;
1319 mutex_enter(&wl->wl_mtx); /* XXX need mutex here to
1320 protect the KASSERTS */
1321 nbufs = wl->wl_bufcount;
1322 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1323 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1324 mutex_exit(&wl->wl_mtx);
1325 if (nbufs == 0)
1326 return 0;
1327 }
1328
1329 /*
1330 * XXX we may consider using LK_UPGRADE here
1331 * if we want to call flush from inside a transaction
1332 */
1333 rw_enter(&wl->wl_rwlock, RW_WRITER);
1334 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
1335 wl->wl_dealloccnt);
1336
1337 /*
1338 * Now that we are fully locked and flushed,
1339 * do another check for nothing to do.
1340 */
1341 if (wl->wl_bufcount == 0) {
1342 goto out;
1343 }
1344
1345 #if 0
1346 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1347 ("wapbl_flush thread %d.%d flushing entries with "
1348 "bufcount=%zu bufbytes=%zu\n",
1349 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1350 wl->wl_bufbytes));
1351 #endif
1352
1353 /* Calculate amount of space needed to flush */
1354 flushsize = wapbl_transaction_len(wl);
1355
1356 if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1357 /*
1358 * XXX this could be handled more gracefully, perhaps place
1359 * only a partial transaction in the log and allow the
1360 * remaining to flush without the protection of the journal.
1361 */
1362 panic("wapbl_flush: current transaction too big to flush\n");
1363 }
1364
1365 error = wapbl_truncate(wl, flushsize, 0);
1366 if (error)
1367 goto out2;
1368
1369 off = wl->wl_head;
1370 KASSERT((off == 0) || ((off >= wl->wl_circ_off) &&
1371 (off < wl->wl_circ_off + wl->wl_circ_size)));
1372 error = wapbl_write_blocks(wl, &off);
1373 if (error)
1374 goto out2;
1375 error = wapbl_write_revocations(wl, &off);
1376 if (error)
1377 goto out2;
1378 error = wapbl_write_inodes(wl, &off);
1379 if (error)
1380 goto out2;
1381
1382 reserved = 0;
1383 if (wl->wl_inohashcnt)
1384 reserved = wapbl_transaction_inodes_len(wl);
1385
1386 head = wl->wl_head;
1387 tail = wl->wl_tail;
1388
1389 wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1390 &head, &tail);
1391 #ifdef WAPBL_DEBUG
1392 if (head != off) {
1393 panic("lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1394 " off=%"PRIdMAX" flush=%zu\n",
1395 (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1396 flushsize);
1397 }
1398 #else
1399 KASSERT(head == off);
1400 #endif
1401
1402 /* Opportunistically move the tail forward if we can */
1403 if (!wapbl_lazy_truncate) {
1404 mutex_enter(&wl->wl_mtx);
1405 delta = wl->wl_reclaimable_bytes;
1406 mutex_exit(&wl->wl_mtx);
1407 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1408 &head, &tail);
1409 }
1410
1411 error = wapbl_write_commit(wl, head, tail);
1412 if (error)
1413 goto out2;
1414
1415 /* poolme? or kmemme? */
1416 we = wapbl_calloc(1, sizeof(*we));
1417
1418 #ifdef WAPBL_DEBUG_BUFBYTES
1419 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1420 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1421 " unsynced=%zu"
1422 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1423 "inodes=%d\n",
1424 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1425 wapbl_space_used(wl->wl_circ_size, head, tail),
1426 wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1427 wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1428 wl->wl_inohashcnt));
1429 #else
1430 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1431 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1432 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1433 "inodes=%d\n",
1434 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1435 wapbl_space_used(wl->wl_circ_size, head, tail),
1436 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1437 wl->wl_dealloccnt, wl->wl_inohashcnt));
1438 #endif
1439
1440
1441 mutex_enter(&bufcache_lock);
1442 mutex_enter(&wl->wl_mtx);
1443
1444 wl->wl_reserved_bytes = reserved;
1445 wl->wl_head = head;
1446 wl->wl_tail = tail;
1447 KASSERT(wl->wl_reclaimable_bytes >= delta);
1448 wl->wl_reclaimable_bytes -= delta;
1449 wl->wl_dealloccnt = 0;
1450 #ifdef WAPBL_DEBUG_BUFBYTES
1451 wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1452 #endif
1453
1454 we->we_wapbl = wl;
1455 we->we_bufcount = wl->wl_bufcount;
1456 #ifdef WAPBL_DEBUG_BUFBYTES
1457 we->we_unsynced_bufbytes = wl->wl_bufbytes;
1458 #endif
1459 we->we_reclaimable_bytes = flushsize;
1460 we->we_error = 0;
1461 SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1462
1463 /*
1464 * this flushes bufs in reverse order than they were queued
1465 * it shouldn't matter, but if we care we could use TAILQ instead.
1466 * XXX Note they will get put on the lru queue when they flush
1467 * so we might actually want to change this to preserve order.
1468 */
1469 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
1470 if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1471 continue;
1472 }
1473 bp->b_iodone = wapbl_biodone;
1474 bp->b_private = we;
1475 bremfree(bp);
1476 wapbl_remove_buf_locked(wl, bp);
1477 mutex_exit(&wl->wl_mtx);
1478 mutex_exit(&bufcache_lock);
1479 bawrite(bp);
1480 mutex_enter(&bufcache_lock);
1481 mutex_enter(&wl->wl_mtx);
1482 }
1483 mutex_exit(&wl->wl_mtx);
1484 mutex_exit(&bufcache_lock);
1485
1486 #if 0
1487 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1488 ("wapbl_flush thread %d.%d done flushing entries...\n",
1489 curproc->p_pid, curlwp->l_lid));
1490 #endif
1491
1492 out:
1493
1494 /*
1495 * If the waitfor flag is set, don't return until everything is
1496 * fully flushed and the on disk log is empty.
1497 */
1498 if (waitfor) {
1499 error = wapbl_truncate(wl, wl->wl_circ_size -
1500 wl->wl_reserved_bytes, wapbl_lazy_truncate);
1501 }
1502
1503 out2:
1504 if (error) {
1505 wl->wl_flush_abort(wl->wl_mount, wl->wl_deallocblks,
1506 wl->wl_dealloclens, wl->wl_dealloccnt);
1507 }
1508
1509 #ifdef WAPBL_DEBUG_PRINT
1510 if (error) {
1511 pid_t pid = -1;
1512 lwpid_t lid = -1;
1513 if (curproc)
1514 pid = curproc->p_pid;
1515 if (curlwp)
1516 lid = curlwp->l_lid;
1517 mutex_enter(&wl->wl_mtx);
1518 #ifdef WAPBL_DEBUG_BUFBYTES
1519 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1520 ("wapbl_flush: thread %d.%d aborted flush: "
1521 "error = %d\n"
1522 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1523 "deallocs=%d inodes=%d\n"
1524 "\terrcnt = %d, reclaimable=%zu reserved=%zu "
1525 "unsynced=%zu\n",
1526 pid, lid, error, wl->wl_bufcount,
1527 wl->wl_bufbytes, wl->wl_bcount,
1528 wl->wl_dealloccnt, wl->wl_inohashcnt,
1529 wl->wl_error_count, wl->wl_reclaimable_bytes,
1530 wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1531 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1532 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1533 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1534 "error = %d, unsynced = %zu\n",
1535 we->we_bufcount, we->we_reclaimable_bytes,
1536 we->we_error, we->we_unsynced_bufbytes));
1537 }
1538 #else
1539 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1540 ("wapbl_flush: thread %d.%d aborted flush: "
1541 "error = %d\n"
1542 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1543 "deallocs=%d inodes=%d\n"
1544 "\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
1545 pid, lid, error, wl->wl_bufcount,
1546 wl->wl_bufbytes, wl->wl_bcount,
1547 wl->wl_dealloccnt, wl->wl_inohashcnt,
1548 wl->wl_error_count, wl->wl_reclaimable_bytes,
1549 wl->wl_reserved_bytes));
1550 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1551 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1552 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1553 "error = %d\n", we->we_bufcount,
1554 we->we_reclaimable_bytes, we->we_error));
1555 }
1556 #endif
1557 mutex_exit(&wl->wl_mtx);
1558 }
1559 #endif
1560
1561 rw_exit(&wl->wl_rwlock);
1562 return error;
1563 }
1564
1565 /****************************************************************/
1566
1567 void
1568 wapbl_jlock_assert(struct wapbl *wl)
1569 {
1570
1571 #ifdef WAPBL_DEBUG_SERIALIZE
1572 KASSERT(rw_write_held(&wl->wl_rwlock));
1573 #else
1574 KASSERT(rw_read_held(&wl->wl_rwlock) || rw_write_held(&wl->wl_rwlock));
1575 #endif
1576 }
1577
1578 void
1579 wapbl_junlock_assert(struct wapbl *wl)
1580 {
1581
1582 #ifdef WAPBL_DEBUG_SERIALIZE
1583 KASSERT(!rw_write_held(&wl->wl_rwlock));
1584 #endif
1585 }
1586
1587 /****************************************************************/
1588
1589 /* locks missing */
1590 void
1591 wapbl_print(struct wapbl *wl,
1592 int full,
1593 void (*pr)(const char *, ...))
1594 {
1595 struct buf *bp;
1596 struct wapbl_entry *we;
1597 (*pr)("wapbl %p", wl);
1598 (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
1599 wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
1600 (*pr)("circ = %zu, header = %zu, head = %"PRIdMAX" tail = %"PRIdMAX"\n",
1601 wl->wl_circ_size, wl->wl_circ_off,
1602 (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
1603 (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
1604 wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
1605 #ifdef WAPBL_DEBUG_BUFBYTES
1606 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1607 "reserved = %zu errcnt = %d unsynced = %zu\n",
1608 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1609 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1610 wl->wl_error_count, wl->wl_unsynced_bufbytes);
1611 #else
1612 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1613 "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
1614 wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1615 wl->wl_error_count);
1616 #endif
1617 (*pr)("\tdealloccnt = %d, dealloclim = %d\n",
1618 wl->wl_dealloccnt, wl->wl_dealloclim);
1619 (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
1620 wl->wl_inohashcnt, wl->wl_inohashmask);
1621 (*pr)("entries:\n");
1622 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1623 #ifdef WAPBL_DEBUG_BUFBYTES
1624 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
1625 "unsynced = %zu\n",
1626 we->we_bufcount, we->we_reclaimable_bytes,
1627 we->we_error, we->we_unsynced_bufbytes);
1628 #else
1629 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
1630 we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
1631 #endif
1632 }
1633 if (full) {
1634 int cnt = 0;
1635 (*pr)("bufs =");
1636 LIST_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
1637 if (!LIST_NEXT(bp, b_wapbllist)) {
1638 (*pr)(" %p", bp);
1639 } else if ((++cnt % 6) == 0) {
1640 (*pr)(" %p,\n\t", bp);
1641 } else {
1642 (*pr)(" %p,", bp);
1643 }
1644 }
1645 (*pr)("\n");
1646
1647 (*pr)("dealloced blks = ");
1648 {
1649 int i;
1650 cnt = 0;
1651 for (i = 0; i < wl->wl_dealloccnt; i++) {
1652 (*pr)(" %"PRId64":%d,",
1653 wl->wl_deallocblks[i],
1654 wl->wl_dealloclens[i]);
1655 if ((++cnt % 4) == 0) {
1656 (*pr)("\n\t");
1657 }
1658 }
1659 }
1660 (*pr)("\n");
1661
1662 (*pr)("registered inodes = ");
1663 {
1664 int i;
1665 cnt = 0;
1666 for (i = 0; i <= wl->wl_inohashmask; i++) {
1667 struct wapbl_ino_head *wih;
1668 struct wapbl_ino *wi;
1669
1670 wih = &wl->wl_inohash[i];
1671 LIST_FOREACH(wi, wih, wi_hash) {
1672 if (wi->wi_ino == 0)
1673 continue;
1674 (*pr)(" %"PRId32"/0%06"PRIo32",",
1675 wi->wi_ino, wi->wi_mode);
1676 if ((++cnt % 4) == 0) {
1677 (*pr)("\n\t");
1678 }
1679 }
1680 }
1681 (*pr)("\n");
1682 }
1683 }
1684 }
1685
1686 #if defined(WAPBL_DEBUG) || defined(DDB)
1687 void
1688 wapbl_dump(struct wapbl *wl)
1689 {
1690 #if defined(WAPBL_DEBUG)
1691 if (!wl)
1692 wl = wapbl_debug_wl;
1693 #endif
1694 if (!wl)
1695 return;
1696 wapbl_print(wl, 1, printf);
1697 }
1698 #endif
1699
1700 /****************************************************************/
1701
1702 void
1703 wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len)
1704 {
1705
1706 wapbl_jlock_assert(wl);
1707
1708 /* XXX should eventually instead tie this into resource estimation */
1709 /* XXX this KASSERT needs locking/mutex analysis */
1710 KASSERT(wl->wl_dealloccnt < wl->wl_dealloclim);
1711 wl->wl_deallocblks[wl->wl_dealloccnt] = blk;
1712 wl->wl_dealloclens[wl->wl_dealloccnt] = len;
1713 wl->wl_dealloccnt++;
1714 WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
1715 ("wapbl_register_deallocation: blk=%"PRId64" len=%d\n", blk, len));
1716 }
1717
1718 /****************************************************************/
1719
1720 static void
1721 wapbl_inodetrk_init(struct wapbl *wl, u_int size)
1722 {
1723
1724 wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
1725 if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) {
1726 pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0,
1727 "wapblinopl", &pool_allocator_nointr, IPL_NONE);
1728 }
1729 }
1730
1731 static void
1732 wapbl_inodetrk_free(struct wapbl *wl)
1733 {
1734
1735 /* XXX this KASSERT needs locking/mutex analysis */
1736 KASSERT(wl->wl_inohashcnt == 0);
1737 hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
1738 if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) {
1739 pool_destroy(&wapbl_ino_pool);
1740 }
1741 }
1742
1743 static struct wapbl_ino *
1744 wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
1745 {
1746 struct wapbl_ino_head *wih;
1747 struct wapbl_ino *wi;
1748
1749 KASSERT(mutex_owned(&wl->wl_mtx));
1750
1751 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1752 LIST_FOREACH(wi, wih, wi_hash) {
1753 if (ino == wi->wi_ino)
1754 return wi;
1755 }
1756 return 0;
1757 }
1758
1759 void
1760 wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1761 {
1762 struct wapbl_ino_head *wih;
1763 struct wapbl_ino *wi;
1764
1765 wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
1766
1767 mutex_enter(&wl->wl_mtx);
1768 if (wapbl_inodetrk_get(wl, ino) == NULL) {
1769 wi->wi_ino = ino;
1770 wi->wi_mode = mode;
1771 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1772 LIST_INSERT_HEAD(wih, wi, wi_hash);
1773 wl->wl_inohashcnt++;
1774 WAPBL_PRINTF(WAPBL_PRINT_INODE,
1775 ("wapbl_register_inode: ino=%"PRId64"\n", ino));
1776 mutex_exit(&wl->wl_mtx);
1777 } else {
1778 mutex_exit(&wl->wl_mtx);
1779 pool_put(&wapbl_ino_pool, wi);
1780 }
1781 }
1782
1783 void
1784 wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1785 {
1786 struct wapbl_ino *wi;
1787
1788 mutex_enter(&wl->wl_mtx);
1789 wi = wapbl_inodetrk_get(wl, ino);
1790 if (wi) {
1791 WAPBL_PRINTF(WAPBL_PRINT_INODE,
1792 ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
1793 KASSERT(wl->wl_inohashcnt > 0);
1794 wl->wl_inohashcnt--;
1795 LIST_REMOVE(wi, wi_hash);
1796 mutex_exit(&wl->wl_mtx);
1797
1798 pool_put(&wapbl_ino_pool, wi);
1799 } else {
1800 mutex_exit(&wl->wl_mtx);
1801 }
1802 }
1803
1804 /****************************************************************/
1805
1806 static __inline size_t
1807 wapbl_transaction_inodes_len(struct wapbl *wl)
1808 {
1809 int blocklen = 1<<wl->wl_log_dev_bshift;
1810 int iph;
1811
1812 /* Calculate number of inodes described in a inodelist header */
1813 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
1814 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
1815
1816 KASSERT(iph > 0);
1817
1818 return MAX(1, howmany(wl->wl_inohashcnt, iph))*blocklen;
1819 }
1820
1821
1822 /* Calculate amount of space a transaction will take on disk */
1823 static size_t
1824 wapbl_transaction_len(struct wapbl *wl)
1825 {
1826 int blocklen = 1<<wl->wl_log_dev_bshift;
1827 size_t len;
1828 int bph;
1829
1830 /* Calculate number of blocks described in a blocklist header */
1831 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1832 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1833
1834 KASSERT(bph > 0);
1835
1836 len = wl->wl_bcount;
1837 len += howmany(wl->wl_bufcount, bph)*blocklen;
1838 len += howmany(wl->wl_dealloccnt, bph)*blocklen;
1839 len += wapbl_transaction_inodes_len(wl);
1840
1841 return len;
1842 }
1843
1844 /*
1845 * Perform commit operation
1846 *
1847 * Note that generation number incrementation needs to
1848 * be protected against racing with other invocations
1849 * of wapbl_commit. This is ok since this routine
1850 * is only invoked from wapbl_flush
1851 */
1852 static int
1853 wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
1854 {
1855 struct wapbl_wc_header *wc = wl->wl_wc_header;
1856 struct timespec ts;
1857 int error;
1858 int force = 1;
1859
1860 /* XXX Calc checksum here, instead we do this for now */
1861 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1862 if (error) {
1863 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1864 ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1865 "returned %d\n", wl->wl_devvp->v_rdev, error));
1866 }
1867
1868 wc->wc_head = head;
1869 wc->wc_tail = tail;
1870 wc->wc_checksum = 0;
1871 wc->wc_version = 1;
1872 getnanotime(&ts);
1873 wc->wc_time = ts.tv_sec;;
1874 wc->wc_timensec = ts.tv_nsec;
1875
1876 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1877 ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
1878 (intmax_t)head, (intmax_t)tail));
1879
1880 /*
1881 * XXX if generation will rollover, then first zero
1882 * over second commit header before trying to write both headers.
1883 */
1884
1885 error = wapbl_write(wc, wc->wc_len, wl->wl_devvp,
1886 wl->wl_logpbn + wc->wc_generation % 2);
1887 if (error)
1888 return error;
1889
1890 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1891 if (error) {
1892 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1893 ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1894 "returned %d\n", wl->wl_devvp->v_rdev, error));
1895 }
1896
1897 /*
1898 * If the generation number was zero, write it out a second time.
1899 * This handles initialization and generation number rollover
1900 */
1901 if (wc->wc_generation++ == 0) {
1902 error = wapbl_write_commit(wl, head, tail);
1903 /*
1904 * This panic should be able to be removed if we do the
1905 * zero'ing mentioned above, and we are certain to roll
1906 * back generation number on failure.
1907 */
1908 if (error)
1909 panic("wapbl_write_commit: error writing duplicate "
1910 "log header: %d\n", error);
1911 }
1912 return 0;
1913 }
1914
1915 /* Returns new offset value */
1916 static int
1917 wapbl_write_blocks(struct wapbl *wl, off_t *offp)
1918 {
1919 struct wapbl_wc_blocklist *wc =
1920 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1921 int blocklen = 1<<wl->wl_log_dev_bshift;
1922 int bph;
1923 struct buf *bp;
1924 off_t off = *offp;
1925 int error;
1926 size_t padding;
1927
1928 KASSERT(rw_write_held(&wl->wl_rwlock));
1929
1930 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1931 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1932
1933 bp = LIST_FIRST(&wl->wl_bufs);
1934
1935 while (bp) {
1936 int cnt;
1937 struct buf *obp = bp;
1938
1939 KASSERT(bp->b_flags & B_LOCKED);
1940
1941 wc->wc_type = WAPBL_WC_BLOCKS;
1942 wc->wc_len = blocklen;
1943 wc->wc_blkcount = 0;
1944 while (bp && (wc->wc_blkcount < bph)) {
1945 /*
1946 * Make sure all the physical block numbers are up to
1947 * date. If this is not always true on a given
1948 * filesystem, then VOP_BMAP must be called. We
1949 * could call VOP_BMAP here, or else in the filesystem
1950 * specific flush callback, although neither of those
1951 * solutions allow us to take the vnode lock. If a
1952 * filesystem requires that we must take the vnode lock
1953 * to call VOP_BMAP, then we can probably do it in
1954 * bwrite when the vnode lock should already be held
1955 * by the invoking code.
1956 */
1957 KASSERT((bp->b_vp->v_type == VBLK) ||
1958 (bp->b_blkno != bp->b_lblkno));
1959 KASSERT(bp->b_blkno > 0);
1960
1961 wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
1962 wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
1963 wc->wc_len += bp->b_bcount;
1964 wc->wc_blkcount++;
1965 bp = LIST_NEXT(bp, b_wapbllist);
1966 }
1967 if (wc->wc_len % blocklen != 0) {
1968 padding = blocklen - wc->wc_len % blocklen;
1969 wc->wc_len += padding;
1970 } else {
1971 padding = 0;
1972 }
1973
1974 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1975 ("wapbl_write_blocks: len = %u (padding %zu) off = %"PRIdMAX"\n",
1976 wc->wc_len, padding, (intmax_t)off));
1977
1978 error = wapbl_circ_write(wl, wc, blocklen, &off);
1979 if (error)
1980 return error;
1981 bp = obp;
1982 cnt = 0;
1983 while (bp && (cnt++ < bph)) {
1984 error = wapbl_circ_write(wl, bp->b_data,
1985 bp->b_bcount, &off);
1986 if (error)
1987 return error;
1988 bp = LIST_NEXT(bp, b_wapbllist);
1989 }
1990 if (padding) {
1991 void *zero;
1992
1993 zero = wapbl_malloc(padding);
1994 memset(zero, 0, padding);
1995 error = wapbl_circ_write(wl, zero, padding, &off);
1996 wapbl_free(zero);
1997 if (error)
1998 return error;
1999 }
2000 }
2001 *offp = off;
2002 return 0;
2003 }
2004
2005 static int
2006 wapbl_write_revocations(struct wapbl *wl, off_t *offp)
2007 {
2008 struct wapbl_wc_blocklist *wc =
2009 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2010 int i;
2011 int blocklen = 1<<wl->wl_log_dev_bshift;
2012 int bph;
2013 off_t off = *offp;
2014 int error;
2015
2016 if (wl->wl_dealloccnt == 0)
2017 return 0;
2018
2019 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
2020 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
2021
2022 i = 0;
2023 while (i < wl->wl_dealloccnt) {
2024 wc->wc_type = WAPBL_WC_REVOCATIONS;
2025 wc->wc_len = blocklen;
2026 wc->wc_blkcount = 0;
2027 while ((i < wl->wl_dealloccnt) && (wc->wc_blkcount < bph)) {
2028 wc->wc_blocks[wc->wc_blkcount].wc_daddr =
2029 wl->wl_deallocblks[i];
2030 wc->wc_blocks[wc->wc_blkcount].wc_dlen =
2031 wl->wl_dealloclens[i];
2032 wc->wc_blkcount++;
2033 i++;
2034 }
2035 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2036 ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
2037 wc->wc_len, (intmax_t)off));
2038 error = wapbl_circ_write(wl, wc, blocklen, &off);
2039 if (error)
2040 return error;
2041 }
2042 *offp = off;
2043 return 0;
2044 }
2045
2046 static int
2047 wapbl_write_inodes(struct wapbl *wl, off_t *offp)
2048 {
2049 struct wapbl_wc_inodelist *wc =
2050 (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
2051 int i;
2052 int blocklen = 1<<wl->wl_log_dev_bshift;
2053 off_t off = *offp;
2054 int error;
2055
2056 struct wapbl_ino_head *wih;
2057 struct wapbl_ino *wi;
2058 int iph;
2059
2060 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2061 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2062
2063 i = 0;
2064 wih = &wl->wl_inohash[0];
2065 wi = 0;
2066 do {
2067 wc->wc_type = WAPBL_WC_INODES;
2068 wc->wc_len = blocklen;
2069 wc->wc_inocnt = 0;
2070 wc->wc_clear = (i == 0);
2071 while ((i < wl->wl_inohashcnt) && (wc->wc_inocnt < iph)) {
2072 while (!wi) {
2073 KASSERT((wih - &wl->wl_inohash[0])
2074 <= wl->wl_inohashmask);
2075 wi = LIST_FIRST(wih++);
2076 }
2077 wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2078 wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2079 wc->wc_inocnt++;
2080 i++;
2081 wi = LIST_NEXT(wi, wi_hash);
2082 }
2083 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2084 ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2085 wc->wc_len, (intmax_t)off));
2086 error = wapbl_circ_write(wl, wc, blocklen, &off);
2087 if (error)
2088 return error;
2089 } while (i < wl->wl_inohashcnt);
2090
2091 *offp = off;
2092 return 0;
2093 }
2094
2095 #endif /* _KERNEL */
2096
2097 /****************************************************************/
2098
2099 #ifdef _KERNEL
2100 static struct pool wapbl_blk_pool;
2101 static int wapbl_blk_pool_refcount;
2102 #endif
2103 struct wapbl_blk {
2104 LIST_ENTRY(wapbl_blk) wb_hash;
2105 daddr_t wb_blk;
2106 off_t wb_off; /* Offset of this block in the log */
2107 };
2108 #define WAPBL_BLKPOOL_MIN 83
2109
2110 static void
2111 wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2112 {
2113 if (size < WAPBL_BLKPOOL_MIN)
2114 size = WAPBL_BLKPOOL_MIN;
2115 KASSERT(wr->wr_blkhash == 0);
2116 #ifdef _KERNEL
2117 wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2118 if (atomic_inc_uint_nv(&wapbl_blk_pool_refcount) == 1) {
2119 pool_init(&wapbl_blk_pool, sizeof(struct wapbl_blk), 0, 0, 0,
2120 "wapblblkpl", &pool_allocator_nointr, IPL_NONE);
2121 }
2122 #else /* ! _KERNEL */
2123 /* Manually implement hashinit */
2124 {
2125 int i;
2126 unsigned long hashsize;
2127 for (hashsize = 1; hashsize < size; hashsize <<= 1)
2128 continue;
2129 wr->wr_blkhash = wapbl_malloc(hashsize * sizeof(*wr->wr_blkhash));
2130 for (i = 0; i < wr->wr_blkhashmask; i++)
2131 LIST_INIT(&wr->wr_blkhash[i]);
2132 wr->wr_blkhashmask = hashsize - 1;
2133 }
2134 #endif /* ! _KERNEL */
2135 }
2136
2137 static void
2138 wapbl_blkhash_free(struct wapbl_replay *wr)
2139 {
2140 KASSERT(wr->wr_blkhashcnt == 0);
2141 #ifdef _KERNEL
2142 hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2143 if (atomic_dec_uint_nv(&wapbl_blk_pool_refcount) == 0) {
2144 pool_destroy(&wapbl_blk_pool);
2145 }
2146 #else /* ! _KERNEL */
2147 wapbl_free(wr->wr_blkhash);
2148 #endif /* ! _KERNEL */
2149 }
2150
2151 static struct wapbl_blk *
2152 wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2153 {
2154 struct wapbl_blk_head *wbh;
2155 struct wapbl_blk *wb;
2156 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2157 LIST_FOREACH(wb, wbh, wb_hash) {
2158 if (blk == wb->wb_blk)
2159 return wb;
2160 }
2161 return 0;
2162 }
2163
2164 static void
2165 wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2166 {
2167 struct wapbl_blk_head *wbh;
2168 struct wapbl_blk *wb;
2169 wb = wapbl_blkhash_get(wr, blk);
2170 if (wb) {
2171 KASSERT(wb->wb_blk == blk);
2172 wb->wb_off = off;
2173 } else {
2174 #ifdef _KERNEL
2175 wb = pool_get(&wapbl_blk_pool, PR_WAITOK);
2176 #else /* ! _KERNEL */
2177 wb = wapbl_malloc(sizeof(*wb));
2178 #endif /* ! _KERNEL */
2179 wb->wb_blk = blk;
2180 wb->wb_off = off;
2181 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2182 LIST_INSERT_HEAD(wbh, wb, wb_hash);
2183 wr->wr_blkhashcnt++;
2184 }
2185 }
2186
2187 static void
2188 wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2189 {
2190 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2191 if (wb) {
2192 KASSERT(wr->wr_blkhashcnt > 0);
2193 wr->wr_blkhashcnt--;
2194 LIST_REMOVE(wb, wb_hash);
2195 #ifdef _KERNEL
2196 pool_put(&wapbl_blk_pool, wb);
2197 #else /* ! _KERNEL */
2198 wapbl_free(wb);
2199 #endif /* ! _KERNEL */
2200 }
2201 }
2202
2203 static void
2204 wapbl_blkhash_clear(struct wapbl_replay *wr)
2205 {
2206 int i;
2207 for (i = 0; i <= wr->wr_blkhashmask; i++) {
2208 struct wapbl_blk *wb;
2209
2210 while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2211 KASSERT(wr->wr_blkhashcnt > 0);
2212 wr->wr_blkhashcnt--;
2213 LIST_REMOVE(wb, wb_hash);
2214 #ifdef _KERNEL
2215 pool_put(&wapbl_blk_pool, wb);
2216 #else /* ! _KERNEL */
2217 wapbl_free(wb);
2218 #endif /* ! _KERNEL */
2219 }
2220 }
2221 KASSERT(wr->wr_blkhashcnt == 0);
2222 }
2223
2224 /****************************************************************/
2225
2226 static int
2227 wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2228 {
2229 size_t slen;
2230 struct wapbl_wc_header *wc = &wr->wr_wc_header;
2231 off_t off = *offp;
2232 int error;
2233
2234 KASSERT(((len >> wc->wc_log_dev_bshift) <<
2235 wc->wc_log_dev_bshift) == len);
2236 if (off < wc->wc_circ_off)
2237 off = wc->wc_circ_off;
2238 slen = wc->wc_circ_off + wc->wc_circ_size - off;
2239 if (slen < len) {
2240 error = wapbl_read(data, slen, wr->wr_devvp,
2241 wr->wr_logpbn + (off >> wc->wc_log_dev_bshift));
2242 if (error)
2243 return error;
2244 data = (uint8_t *)data + slen;
2245 len -= slen;
2246 off = wc->wc_circ_off;
2247 }
2248 error = wapbl_read(data, len, wr->wr_devvp,
2249 wr->wr_logpbn + (off >> wc->wc_log_dev_bshift));
2250 if (error)
2251 return error;
2252 off += len;
2253 if (off >= wc->wc_circ_off + wc->wc_circ_size)
2254 off = wc->wc_circ_off;
2255 *offp = off;
2256 return 0;
2257 }
2258
2259 static void
2260 wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2261 {
2262 size_t slen;
2263 struct wapbl_wc_header *wc = &wr->wr_wc_header;
2264 off_t off = *offp;
2265
2266 KASSERT(((len >> wc->wc_log_dev_bshift) <<
2267 wc->wc_log_dev_bshift) == len);
2268
2269 if (off < wc->wc_circ_off)
2270 off = wc->wc_circ_off;
2271 slen = wc->wc_circ_off + wc->wc_circ_size - off;
2272 if (slen < len) {
2273 len -= slen;
2274 off = wc->wc_circ_off;
2275 }
2276 off += len;
2277 if (off >= wc->wc_circ_off + wc->wc_circ_size)
2278 off = wc->wc_circ_off;
2279 *offp = off;
2280 }
2281
2282 /****************************************************************/
2283
2284 int
2285 wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2286 daddr_t off, size_t count, size_t blksize)
2287 {
2288 struct wapbl_replay *wr;
2289 int error;
2290 struct vnode *devvp;
2291 daddr_t logpbn;
2292 uint8_t *scratch;
2293 struct wapbl_wc_header *wch;
2294 struct wapbl_wc_header *wch2;
2295 /* Use this until we read the actual log header */
2296 int log_dev_bshift = DEV_BSHIFT;
2297 size_t used;
2298
2299 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2300 ("wapbl_replay_start: vp=%p off=%"PRId64 " count=%zu blksize=%zu\n",
2301 vp, off, count, blksize));
2302
2303 if (off < 0)
2304 return EINVAL;
2305
2306 if (blksize < DEV_BSIZE)
2307 return EINVAL;
2308 if (blksize % DEV_BSIZE)
2309 return EINVAL;
2310
2311 #ifdef _KERNEL
2312 #if 0
2313 /* XXX vp->v_size isn't reliably set for VBLK devices,
2314 * especially root. However, we might still want to verify
2315 * that the full load is readable */
2316 if ((off + count) * blksize > vp->v_size)
2317 return EINVAL;
2318 #endif
2319
2320 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2321 return error;
2322 }
2323 #else /* ! _KERNEL */
2324 devvp = vp;
2325 logpbn = off;
2326 #endif /* ! _KERNEL */
2327
2328 scratch = wapbl_malloc(MAXBSIZE);
2329
2330 error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, logpbn);
2331 if (error)
2332 goto errout;
2333
2334 wch = (struct wapbl_wc_header *)scratch;
2335 wch2 =
2336 (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
2337 /* XXX verify checksums and magic numbers */
2338 if (wch->wc_type != WAPBL_WC_HEADER) {
2339 printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
2340 error = EFTYPE;
2341 goto errout;
2342 }
2343
2344 if (wch2->wc_generation > wch->wc_generation)
2345 wch = wch2;
2346
2347 wr = wapbl_calloc(1, sizeof(*wr));
2348
2349 wr->wr_logvp = vp;
2350 wr->wr_devvp = devvp;
2351 wr->wr_logpbn = logpbn;
2352
2353 wr->wr_scratch = scratch;
2354
2355 memcpy(&wr->wr_wc_header, wch, sizeof(wr->wr_wc_header));
2356
2357 used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
2358
2359 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2360 ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
2361 " len=%"PRId64" used=%zu\n",
2362 wch->wc_head, wch->wc_tail, wch->wc_circ_off,
2363 wch->wc_circ_size, used));
2364
2365 wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
2366
2367 error = wapbl_replay_process(wr);
2368 if (error) {
2369 wapbl_replay_stop(wr);
2370 wapbl_replay_free(wr);
2371 return error;
2372 }
2373
2374 *wrp = wr;
2375 return 0;
2376
2377 errout:
2378 wapbl_free(scratch);
2379 return error;
2380 }
2381
2382 void
2383 wapbl_replay_stop(struct wapbl_replay *wr)
2384 {
2385
2386 if (!wapbl_replay_isopen(wr))
2387 return;
2388
2389 WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
2390
2391 wapbl_free(wr->wr_scratch);
2392 wr->wr_scratch = 0;
2393
2394 wr->wr_logvp = 0;
2395
2396 wapbl_blkhash_clear(wr);
2397 wapbl_blkhash_free(wr);
2398 }
2399
2400 void
2401 wapbl_replay_free(struct wapbl_replay *wr)
2402 {
2403
2404 KDASSERT(!wapbl_replay_isopen(wr));
2405
2406 if (wr->wr_inodes)
2407 wapbl_free(wr->wr_inodes);
2408 wapbl_free(wr);
2409 }
2410
2411 #ifdef _KERNEL
2412 int
2413 wapbl_replay_isopen1(struct wapbl_replay *wr)
2414 {
2415
2416 return wapbl_replay_isopen(wr);
2417 }
2418 #endif
2419
2420 static void
2421 wapbl_replay_process_blocks(struct wapbl_replay *wr, off_t *offp)
2422 {
2423 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2424 struct wapbl_wc_blocklist *wc =
2425 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2426 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2427 int i, j, n;
2428
2429 for (i = 0; i < wc->wc_blkcount; i++) {
2430 /*
2431 * Enter each physical block into the hashtable independently.
2432 */
2433 n = wc->wc_blocks[i].wc_dlen >> wch->wc_fs_dev_bshift;
2434 for (j = 0; j < n; j++) {
2435 wapbl_blkhash_ins(wr, wc->wc_blocks[i].wc_daddr + j,
2436 *offp);
2437 wapbl_circ_advance(wr, fsblklen, offp);
2438 }
2439 }
2440 }
2441
2442 static void
2443 wapbl_replay_process_revocations(struct wapbl_replay *wr)
2444 {
2445 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2446 struct wapbl_wc_blocklist *wc =
2447 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2448 int i, j, n;
2449
2450 for (i = 0; i < wc->wc_blkcount; i++) {
2451 /*
2452 * Remove any blocks found from the hashtable.
2453 */
2454 n = wc->wc_blocks[i].wc_dlen >> wch->wc_fs_dev_bshift;
2455 for (j = 0; j < n; j++)
2456 wapbl_blkhash_rem(wr, wc->wc_blocks[i].wc_daddr + j);
2457 }
2458 }
2459
2460 static void
2461 wapbl_replay_process_inodes(struct wapbl_replay *wr, off_t oldoff, off_t newoff)
2462 {
2463 struct wapbl_wc_inodelist *wc =
2464 (struct wapbl_wc_inodelist *)wr->wr_scratch;
2465 /*
2466 * Keep track of where we found this so location won't be
2467 * overwritten.
2468 */
2469 if (wc->wc_clear) {
2470 wr->wr_inodestail = oldoff;
2471 wr->wr_inodescnt = 0;
2472 if (wr->wr_inodes != NULL) {
2473 wapbl_free(wr->wr_inodes);
2474 wr->wr_inodes = NULL;
2475 }
2476 }
2477 wr->wr_inodeshead = newoff;
2478 if (wc->wc_inocnt == 0)
2479 return;
2480
2481 wr->wr_inodes = wapbl_realloc(wr->wr_inodes,
2482 (wr->wr_inodescnt + wc->wc_inocnt) * sizeof(wc->wc_inodes[0]));
2483 memcpy(&wr->wr_inodes[wr->wr_inodescnt], wc->wc_inodes,
2484 wc->wc_inocnt * sizeof(wc->wc_inodes[0]));
2485 wr->wr_inodescnt += wc->wc_inocnt;
2486 }
2487
2488 static int
2489 wapbl_replay_process(struct wapbl_replay *wr)
2490 {
2491 off_t off;
2492 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2493 int error;
2494
2495 int logblklen = 1<<wch->wc_log_dev_bshift;
2496
2497 wapbl_blkhash_clear(wr);
2498
2499 off = wch->wc_tail;
2500 while (off != wch->wc_head) {
2501 struct wapbl_wc_null *wcn;
2502 off_t saveoff = off;
2503 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2504 if (error)
2505 goto errout;
2506 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2507 switch (wcn->wc_type) {
2508 case WAPBL_WC_BLOCKS:
2509 wapbl_replay_process_blocks(wr, &off);
2510 break;
2511
2512 case WAPBL_WC_REVOCATIONS:
2513 wapbl_replay_process_revocations(wr);
2514 break;
2515
2516 case WAPBL_WC_INODES:
2517 wapbl_replay_process_inodes(wr, saveoff, off);
2518 break;
2519
2520 default:
2521 printf("Unrecognized wapbl type: 0x%08x\n",
2522 wcn->wc_type);
2523 error = EFTYPE;
2524 goto errout;
2525 }
2526 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2527 if (off != saveoff) {
2528 printf("wapbl_replay: corrupted records\n");
2529 error = EFTYPE;
2530 goto errout;
2531 }
2532 }
2533 return 0;
2534
2535 errout:
2536 wapbl_blkhash_clear(wr);
2537 return error;
2538 }
2539
2540 #if 0
2541 int
2542 wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
2543 {
2544 off_t off;
2545 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2546 int mismatchcnt = 0;
2547 int logblklen = 1<<wch->wc_log_dev_bshift;
2548 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2549 void *scratch1 = wapbl_malloc(MAXBSIZE);
2550 void *scratch2 = wapbl_malloc(MAXBSIZE);
2551 int error = 0;
2552
2553 KDASSERT(wapbl_replay_isopen(wr));
2554
2555 off = wch->wc_tail;
2556 while (off != wch->wc_head) {
2557 struct wapbl_wc_null *wcn;
2558 #ifdef DEBUG
2559 off_t saveoff = off;
2560 #endif
2561 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2562 if (error)
2563 goto out;
2564 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2565 switch (wcn->wc_type) {
2566 case WAPBL_WC_BLOCKS:
2567 {
2568 struct wapbl_wc_blocklist *wc =
2569 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2570 int i;
2571 for (i = 0; i < wc->wc_blkcount; i++) {
2572 int foundcnt = 0;
2573 int dirtycnt = 0;
2574 int j, n;
2575 /*
2576 * Check each physical block into the
2577 * hashtable independently
2578 */
2579 n = wc->wc_blocks[i].wc_dlen >>
2580 wch->wc_fs_dev_bshift;
2581 for (j = 0; j < n; j++) {
2582 struct wapbl_blk *wb =
2583 wapbl_blkhash_get(wr,
2584 wc->wc_blocks[i].wc_daddr + j);
2585 if (wb && (wb->wb_off == off)) {
2586 foundcnt++;
2587 error =
2588 wapbl_circ_read(wr,
2589 scratch1, fsblklen,
2590 &off);
2591 if (error)
2592 goto out;
2593 error =
2594 wapbl_read(scratch2,
2595 fsblklen, fsdevvp,
2596 wb->wb_blk);
2597 if (error)
2598 goto out;
2599 if (memcmp(scratch1,
2600 scratch2,
2601 fsblklen)) {
2602 printf(
2603 "wapbl_verify: mismatch block %"PRId64" at off %"PRIdMAX"\n",
2604 wb->wb_blk, (intmax_t)off);
2605 dirtycnt++;
2606 mismatchcnt++;
2607 }
2608 } else {
2609 wapbl_circ_advance(wr,
2610 fsblklen, &off);
2611 }
2612 }
2613 #if 0
2614 /*
2615 * If all of the blocks in an entry
2616 * are clean, then remove all of its
2617 * blocks from the hashtable since they
2618 * never will need replay.
2619 */
2620 if ((foundcnt != 0) &&
2621 (dirtycnt == 0)) {
2622 off = saveoff;
2623 wapbl_circ_advance(wr,
2624 logblklen, &off);
2625 for (j = 0; j < n; j++) {
2626 struct wapbl_blk *wb =
2627 wapbl_blkhash_get(wr,
2628 wc->wc_blocks[i].wc_daddr + j);
2629 if (wb &&
2630 (wb->wb_off == off)) {
2631 wapbl_blkhash_rem(wr, wb->wb_blk);
2632 }
2633 wapbl_circ_advance(wr,
2634 fsblklen, &off);
2635 }
2636 }
2637 #endif
2638 }
2639 }
2640 break;
2641 case WAPBL_WC_REVOCATIONS:
2642 case WAPBL_WC_INODES:
2643 break;
2644 default:
2645 KASSERT(0);
2646 }
2647 #ifdef DEBUG
2648 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2649 KASSERT(off == saveoff);
2650 #endif
2651 }
2652 out:
2653 wapbl_free(scratch1);
2654 wapbl_free(scratch2);
2655 if (!error && mismatchcnt)
2656 error = EFTYPE;
2657 return error;
2658 }
2659 #endif
2660
2661 int
2662 wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
2663 {
2664 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2665 struct wapbl_blk *wb;
2666 size_t i;
2667 off_t off;
2668 void *scratch;
2669 int error = 0;
2670 int fsblklen = 1 << wch->wc_fs_dev_bshift;
2671
2672 KDASSERT(wapbl_replay_isopen(wr));
2673
2674 scratch = wapbl_malloc(MAXBSIZE);
2675
2676 for (i = 0; i < wr->wr_blkhashmask; ++i) {
2677 LIST_FOREACH(wb, &wr->wr_blkhash[i], wb_hash) {
2678 off = wb->wb_off;
2679 error = wapbl_circ_read(wr, scratch, fsblklen, &off);
2680 if (error)
2681 break;
2682 error = wapbl_write(scratch, fsblklen, fsdevvp,
2683 wb->wb_blk);
2684 if (error)
2685 break;
2686 }
2687 }
2688
2689 wapbl_free(scratch);
2690 return error;
2691 }
2692
2693 int
2694 wapbl_replay_can_read(struct wapbl_replay *wr, daddr_t blk, long len)
2695 {
2696 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2697 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2698
2699 KDASSERT(wapbl_replay_isopen(wr));
2700 KASSERT((len % fsblklen) == 0);
2701
2702 while (len != 0) {
2703 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2704 if (wb)
2705 return 1;
2706 len -= fsblklen;
2707 }
2708 return 0;
2709 }
2710
2711 int
2712 wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
2713 {
2714 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2715 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2716
2717 KDASSERT(wapbl_replay_isopen(wr));
2718
2719 KASSERT((len % fsblklen) == 0);
2720
2721 while (len != 0) {
2722 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2723 if (wb) {
2724 off_t off = wb->wb_off;
2725 int error;
2726 error = wapbl_circ_read(wr, data, fsblklen, &off);
2727 if (error)
2728 return error;
2729 }
2730 data = (uint8_t *)data + fsblklen;
2731 len -= fsblklen;
2732 blk++;
2733 }
2734 return 0;
2735 }
2736