vfs_wapbl.c revision 1.4 1 /* $NetBSD: vfs_wapbl.c,v 1.4 2008/11/10 20:12:13 joerg Exp $ */
2
3 /*-
4 * Copyright (c) 2003,2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This implements file system independent write ahead filesystem logging.
34 */
35
36 #define WAPBL_INTERNAL
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.4 2008/11/10 20:12:13 joerg Exp $");
40
41 #include <sys/param.h>
42
43 #ifdef _KERNEL
44 #include <sys/param.h>
45 #include <sys/namei.h>
46 #include <sys/proc.h>
47 #include <sys/uio.h>
48 #include <sys/vnode.h>
49 #include <sys/file.h>
50 #include <sys/malloc.h>
51 #include <sys/resourcevar.h>
52 #include <sys/conf.h>
53 #include <sys/mount.h>
54 #include <sys/kernel.h>
55 #include <sys/kauth.h>
56 #include <sys/mutex.h>
57 #include <sys/atomic.h>
58 #include <sys/wapbl.h>
59
60 #if WAPBL_UVM_ALLOC
61 #include <uvm/uvm.h>
62 #endif
63
64 #include <miscfs/specfs/specdev.h>
65
66 MALLOC_JUSTDEFINE(M_WAPBL, "wapbl", "write-ahead physical block logging");
67 #define wapbl_malloc(s) malloc((s), M_WAPBL, M_WAITOK)
68 #define wapbl_free(a) free((a), M_WAPBL)
69 #define wapbl_calloc(n, s) malloc((n)*(s), M_WAPBL, M_WAITOK | M_ZERO)
70
71 #else /* !_KERNEL */
72 #include <assert.h>
73 #include <errno.h>
74 #include <stdio.h>
75 #include <stdbool.h>
76 #include <stdlib.h>
77 #include <string.h>
78
79 #include <sys/time.h>
80 #include <sys/wapbl.h>
81
82 #define KDASSERT(x) assert(x)
83 #define KASSERT(x) assert(x)
84 #define wapbl_malloc(s) malloc(s)
85 #define wapbl_free(a) free(a)
86 #define wapbl_calloc(n, s) calloc((n), (s))
87
88 #endif /* !_KERNEL */
89
90 /*
91 * INTERNAL DATA STRUCTURES
92 */
93
94 /*
95 * This structure holds per-mount log information.
96 *
97 * Legend: a = atomic access only
98 * r = read-only after init
99 * l = rwlock held
100 * m = mutex held
101 * u = unlocked access ok
102 * b = bufcache_lock held
103 */
104 struct wapbl {
105 struct vnode *wl_logvp; /* r: log here */
106 struct vnode *wl_devvp; /* r: log on this device */
107 struct mount *wl_mount; /* r: mountpoint wl is associated with */
108 daddr_t wl_logpbn; /* r: Physical block number of start of log */
109 int wl_log_dev_bshift; /* r: logarithm of device block size of log
110 device */
111 int wl_fs_dev_bshift; /* r: logarithm of device block size of
112 filesystem device */
113
114 unsigned wl_lock_count; /* m: Count of transactions in progress */
115
116 size_t wl_circ_size; /* r: Number of bytes in buffer of log */
117 size_t wl_circ_off; /* r: Number of bytes reserved at start */
118
119 size_t wl_bufcount_max; /* r: Number of buffers reserved for log */
120 size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */
121
122 off_t wl_head; /* l: Byte offset of log head */
123 off_t wl_tail; /* l: Byte offset of log tail */
124 /*
125 * head == tail == 0 means log is empty
126 * head == tail != 0 means log is full
127 * see assertions in wapbl_advance() for other boundary conditions.
128 * only truncate moves the tail, except when flush sets it to
129 * wl_header_size only flush moves the head, except when truncate
130 * sets it to 0.
131 */
132
133 struct wapbl_wc_header *wl_wc_header; /* l */
134 void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */
135
136 kmutex_t wl_mtx; /* u: short-term lock */
137 krwlock_t wl_rwlock; /* u: File system transaction lock */
138
139 /*
140 * Must be held while accessing
141 * wl_count or wl_bufs or head or tail
142 */
143
144 /*
145 * Callback called from within the flush routine to flush any extra
146 * bits. Note that flush may be skipped without calling this if
147 * there are no outstanding buffers in the transaction.
148 */
149 wapbl_flush_fn_t wl_flush; /* r */
150 wapbl_flush_fn_t wl_flush_abort;/* r */
151
152 size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */
153 size_t wl_bufcount; /* m: Count of buffers in wl_bufs */
154 size_t wl_bcount; /* m: Total bcount of wl_bufs */
155
156 LIST_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
157
158 kcondvar_t wl_reclaimable_cv; /* m (obviously) */
159 size_t wl_reclaimable_bytes; /* m: Amount of space available for
160 reclamation by truncate */
161 int wl_error_count; /* m: # of wl_entries with errors */
162 size_t wl_reserved_bytes; /* never truncate log smaller than this */
163
164 #ifdef WAPBL_DEBUG_BUFBYTES
165 size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
166 #endif
167
168 daddr_t *wl_deallocblks;/* l: address of block */
169 int *wl_dealloclens; /* l: size of block (fragments, kom ihg) */
170 int wl_dealloccnt; /* l: total count */
171 int wl_dealloclim; /* l: max count */
172
173 /* hashtable of inode numbers for allocated but unlinked inodes */
174 /* synch ??? */
175 LIST_HEAD(wapbl_ino_head, wapbl_ino) *wl_inohash;
176 u_long wl_inohashmask;
177 int wl_inohashcnt;
178
179 SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* On disk transaction
180 accounting */
181 };
182
183 #ifdef WAPBL_DEBUG_PRINT
184 int wapbl_debug_print = WAPBL_DEBUG_PRINT;
185 #endif
186
187 /****************************************************************/
188 #ifdef _KERNEL
189
190 #ifdef WAPBL_DEBUG
191 struct wapbl *wapbl_debug_wl;
192 #endif
193
194 static int wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail);
195 static int wapbl_write_blocks(struct wapbl *wl, off_t *offp);
196 static int wapbl_write_revocations(struct wapbl *wl, off_t *offp);
197 static int wapbl_write_inodes(struct wapbl *wl, off_t *offp);
198 #endif /* _KERNEL */
199
200 static int wapbl_replay_prescan(struct wapbl_replay *wr);
201 static int wapbl_replay_get_inodes(struct wapbl_replay *wr);
202
203 static __inline size_t wapbl_space_free(size_t avail, off_t head,
204 off_t tail);
205 static __inline size_t wapbl_space_used(size_t avail, off_t head,
206 off_t tail);
207
208 #ifdef _KERNEL
209
210 #define WAPBL_INODETRK_SIZE 83
211 static int wapbl_ino_pool_refcount;
212 static struct pool wapbl_ino_pool;
213 struct wapbl_ino {
214 LIST_ENTRY(wapbl_ino) wi_hash;
215 ino_t wi_ino;
216 mode_t wi_mode;
217 };
218
219 static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
220 static void wapbl_inodetrk_free(struct wapbl *wl);
221 static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
222
223 static size_t wapbl_transaction_len(struct wapbl *wl);
224 static __inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
225
226 #ifdef DEBUG
227 int wapbl_replay_verify(struct wapbl_replay *, struct vnode *);
228 #endif
229
230 static int wapbl_replay_isopen1(struct wapbl_replay *);
231
232 /*
233 * This is useful for debugging. If set, the log will
234 * only be truncated when necessary.
235 */
236 int wapbl_lazy_truncate = 0;
237
238 struct wapbl_ops wapbl_ops = {
239 .wo_wapbl_discard = wapbl_discard,
240 .wo_wapbl_replay_isopen = wapbl_replay_isopen1,
241 .wo_wapbl_replay_read = wapbl_replay_read,
242 .wo_wapbl_add_buf = wapbl_add_buf,
243 .wo_wapbl_remove_buf = wapbl_remove_buf,
244 .wo_wapbl_resize_buf = wapbl_resize_buf,
245 .wo_wapbl_begin = wapbl_begin,
246 .wo_wapbl_end = wapbl_end,
247 .wo_wapbl_junlock_assert= wapbl_junlock_assert,
248
249 /* XXX: the following is only used to say "this is a wapbl buf" */
250 .wo_wapbl_biodone = wapbl_biodone,
251 };
252
253 void
254 wapbl_init()
255 {
256
257 malloc_type_attach(M_WAPBL);
258 }
259
260 int
261 wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
262 daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
263 wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
264 {
265 struct wapbl *wl;
266 struct vnode *devvp;
267 daddr_t logpbn;
268 int error;
269 int log_dev_bshift = DEV_BSHIFT;
270 int fs_dev_bshift = DEV_BSHIFT;
271 int run;
272
273 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_start: vp=%p off=%" PRId64
274 " count=%zu blksize=%zu\n", vp, off, count, blksize));
275
276 if (log_dev_bshift > fs_dev_bshift) {
277 WAPBL_PRINTF(WAPBL_PRINT_OPEN,
278 ("wapbl: log device's block size cannot be larger "
279 "than filesystem's\n"));
280 /*
281 * Not currently implemented, although it could be if
282 * needed someday.
283 */
284 return ENOSYS;
285 }
286
287 if (off < 0)
288 return EINVAL;
289
290 if (blksize < DEV_BSIZE)
291 return EINVAL;
292 if (blksize % DEV_BSIZE)
293 return EINVAL;
294
295 /* XXXTODO: verify that the full load is writable */
296
297 /*
298 * XXX check for minimum log size
299 * minimum is governed by minimum amount of space
300 * to complete a transaction. (probably truncate)
301 */
302 /* XXX for now pick something minimal */
303 if ((count * blksize) < MAXPHYS) {
304 return ENOSPC;
305 }
306
307 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
308 return error;
309 }
310
311 wl = wapbl_calloc(1, sizeof(*wl));
312 rw_init(&wl->wl_rwlock);
313 mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
314 cv_init(&wl->wl_reclaimable_cv, "wapblrec");
315 LIST_INIT(&wl->wl_bufs);
316 SIMPLEQ_INIT(&wl->wl_entries);
317
318 wl->wl_logvp = vp;
319 wl->wl_devvp = devvp;
320 wl->wl_mount = mp;
321 wl->wl_logpbn = logpbn;
322 wl->wl_log_dev_bshift = log_dev_bshift;
323 wl->wl_fs_dev_bshift = fs_dev_bshift;
324
325 wl->wl_flush = flushfn;
326 wl->wl_flush_abort = flushabortfn;
327
328 /* Reserve two log device blocks for the commit headers */
329 wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
330 wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
331 /* truncate the log usage to a multiple of log_dev_bshift */
332 wl->wl_circ_size >>= wl->wl_log_dev_bshift;
333 wl->wl_circ_size <<= wl->wl_log_dev_bshift;
334
335 /*
336 * wl_bufbytes_max limits the size of the in memory transaction space.
337 * - Since buffers are allocated and accounted for in units of
338 * PAGE_SIZE it is required to be a multiple of PAGE_SIZE
339 * (i.e. 1<<PAGE_SHIFT)
340 * - Since the log device has to be written in units of
341 * 1<<wl_log_dev_bshift it is required to be a mulitple of
342 * 1<<wl_log_dev_bshift.
343 * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
344 * it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
345 * Therefore it must be multiple of the least common multiple of those
346 * three quantities. Fortunately, all of those quantities are
347 * guaranteed to be a power of two, and the least common multiple of
348 * a set of numbers which are all powers of two is simply the maximum
349 * of those numbers. Finally, the maximum logarithm of a power of two
350 * is the same as the log of the maximum power of two. So we can do
351 * the following operations to size wl_bufbytes_max:
352 */
353
354 /* XXX fix actual number of pages reserved per filesystem. */
355 wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
356
357 /* Round wl_bufbytes_max to the largest power of two constraint */
358 wl->wl_bufbytes_max >>= PAGE_SHIFT;
359 wl->wl_bufbytes_max <<= PAGE_SHIFT;
360 wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
361 wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
362 wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
363 wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
364
365 /* XXX maybe use filesystem fragment size instead of 1024 */
366 /* XXX fix actual number of buffers reserved per filesystem. */
367 wl->wl_bufcount_max = (nbuf / 2) * 1024;
368
369 /* XXX tie this into resource estimation */
370 wl->wl_dealloclim = 2 * btodb(wl->wl_bufbytes_max);
371
372 #if WAPBL_UVM_ALLOC
373 wl->wl_deallocblks = (void *) uvm_km_zalloc(kernel_map,
374 round_page(sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim));
375 KASSERT(wl->wl_deallocblks != NULL);
376 wl->wl_dealloclens = (void *) uvm_km_zalloc(kernel_map,
377 round_page(sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim));
378 KASSERT(wl->wl_dealloclens != NULL);
379 #else
380 wl->wl_deallocblks = wapbl_malloc(sizeof(*wl->wl_deallocblks) *
381 wl->wl_dealloclim);
382 wl->wl_dealloclens = wapbl_malloc(sizeof(*wl->wl_dealloclens) *
383 wl->wl_dealloclim);
384 #endif
385
386 wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
387
388 /* Initialize the commit header */
389 {
390 struct wapbl_wc_header *wc;
391 size_t len = 1<<wl->wl_log_dev_bshift;
392 wc = wapbl_calloc(1, len);
393 wc->wc_type = WAPBL_WC_HEADER;
394 wc->wc_len = len;
395 wc->wc_circ_off = wl->wl_circ_off;
396 wc->wc_circ_size = wl->wl_circ_size;
397 /* XXX wc->wc_fsid */
398 wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
399 wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
400 wl->wl_wc_header = wc;
401 wl->wl_wc_scratch = wapbl_malloc(len);
402 }
403
404 /*
405 * if there was an existing set of unlinked but
406 * allocated inodes, preserve it in the new
407 * log.
408 */
409 if (wr && wr->wr_inodescnt) {
410 int i;
411
412 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
413 ("wapbl_start: reusing log with %d inodes\n",
414 wr->wr_inodescnt));
415
416 /*
417 * Its only valid to reuse the replay log if its
418 * the same as the new log we just opened.
419 */
420 KDASSERT(!wapbl_replay_isopen(wr));
421 KASSERT(devvp->v_rdev == wr->wr_devvp->v_rdev);
422 KASSERT(logpbn == wr->wr_logpbn);
423 KASSERT(wl->wl_circ_size == wr->wr_wc_header.wc_circ_size);
424 KASSERT(wl->wl_circ_off == wr->wr_wc_header.wc_circ_off);
425 KASSERT(wl->wl_log_dev_bshift ==
426 wr->wr_wc_header.wc_log_dev_bshift);
427 KASSERT(wl->wl_fs_dev_bshift ==
428 wr->wr_wc_header.wc_fs_dev_bshift);
429
430 wl->wl_wc_header->wc_generation =
431 wr->wr_wc_header.wc_generation + 1;
432
433 for (i = 0; i < wr->wr_inodescnt; i++)
434 wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
435 wr->wr_inodes[i].wr_imode);
436
437 /* Make sure new transaction won't overwrite old inodes list */
438 KDASSERT(wapbl_transaction_len(wl) <=
439 wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
440 wr->wr_inodestail));
441
442 wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
443 wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
444 wapbl_transaction_len(wl);
445
446 error = wapbl_write_inodes(wl, &wl->wl_head);
447 if (error)
448 goto errout;
449
450 KASSERT(wl->wl_head != wl->wl_tail);
451 KASSERT(wl->wl_head != 0);
452 }
453
454 error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
455 if (error) {
456 goto errout;
457 }
458
459 *wlp = wl;
460 #if defined(WAPBL_DEBUG)
461 wapbl_debug_wl = wl;
462 #endif
463
464 return 0;
465 errout:
466 wapbl_discard(wl);
467 wapbl_free(wl->wl_wc_scratch);
468 wapbl_free(wl->wl_wc_header);
469 #if WAPBL_UVM_ALLOC
470 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_deallocblks,
471 round_page(sizeof(*wl->wl_deallocblks *
472 wl->wl_dealloclim)));
473 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_dealloclens,
474 round_page(sizeof(*wl->wl_dealloclens *
475 wl->wl_dealloclim)));
476 #else
477 wapbl_free(wl->wl_deallocblks);
478 wapbl_free(wl->wl_dealloclens);
479 #endif
480 wapbl_inodetrk_free(wl);
481 wapbl_free(wl);
482
483 return error;
484 }
485
486 /*
487 * Like wapbl_flush, only discards the transaction
488 * completely
489 */
490
491 void
492 wapbl_discard(struct wapbl *wl)
493 {
494 struct wapbl_entry *we;
495 struct buf *bp;
496 int i;
497
498 /*
499 * XXX we may consider using upgrade here
500 * if we want to call flush from inside a transaction
501 */
502 rw_enter(&wl->wl_rwlock, RW_WRITER);
503 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
504 wl->wl_dealloccnt);
505
506 #ifdef WAPBL_DEBUG_PRINT
507 {
508 struct wapbl_entry *we;
509 pid_t pid = -1;
510 lwpid_t lid = -1;
511 if (curproc)
512 pid = curproc->p_pid;
513 if (curlwp)
514 lid = curlwp->l_lid;
515 #ifdef WAPBL_DEBUG_BUFBYTES
516 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
517 ("wapbl_discard: thread %d.%d discarding "
518 "transaction\n"
519 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
520 "deallocs=%d inodes=%d\n"
521 "\terrcnt = %u, reclaimable=%zu reserved=%zu "
522 "unsynced=%zu\n",
523 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
524 wl->wl_bcount, wl->wl_dealloccnt,
525 wl->wl_inohashcnt, wl->wl_error_count,
526 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
527 wl->wl_unsynced_bufbytes));
528 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
529 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
530 ("\tentry: bufcount = %zu, reclaimable = %zu, "
531 "error = %d, unsynced = %zu\n",
532 we->we_bufcount, we->we_reclaimable_bytes,
533 we->we_error, we->we_unsynced_bufbytes));
534 }
535 #else /* !WAPBL_DEBUG_BUFBYTES */
536 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
537 ("wapbl_discard: thread %d.%d discarding transaction\n"
538 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
539 "deallocs=%d inodes=%d\n"
540 "\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
541 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
542 wl->wl_bcount, wl->wl_dealloccnt,
543 wl->wl_inohashcnt, wl->wl_error_count,
544 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes));
545 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
546 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
547 ("\tentry: bufcount = %zu, reclaimable = %zu, "
548 "error = %d\n",
549 we->we_bufcount, we->we_reclaimable_bytes,
550 we->we_error));
551 }
552 #endif /* !WAPBL_DEBUG_BUFBYTES */
553 }
554 #endif /* WAPBL_DEBUG_PRINT */
555
556 for (i = 0; i <= wl->wl_inohashmask; i++) {
557 struct wapbl_ino_head *wih;
558 struct wapbl_ino *wi;
559
560 wih = &wl->wl_inohash[i];
561 while ((wi = LIST_FIRST(wih)) != NULL) {
562 LIST_REMOVE(wi, wi_hash);
563 pool_put(&wapbl_ino_pool, wi);
564 KASSERT(wl->wl_inohashcnt > 0);
565 wl->wl_inohashcnt--;
566 }
567 }
568
569 /*
570 * clean buffer list
571 */
572 mutex_enter(&bufcache_lock);
573 mutex_enter(&wl->wl_mtx);
574 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
575 if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
576 /*
577 * The buffer will be unlocked and
578 * removed from the transaction in brelse
579 */
580 mutex_exit(&wl->wl_mtx);
581 brelsel(bp, 0);
582 mutex_enter(&wl->wl_mtx);
583 }
584 }
585 mutex_exit(&wl->wl_mtx);
586 mutex_exit(&bufcache_lock);
587
588 /*
589 * Remove references to this wl from wl_entries, free any which
590 * no longer have buffers, others will be freed in wapbl_biodone
591 * when they no longer have any buffers.
592 */
593 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
594 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
595 /* XXX should we be accumulating wl_error_count
596 * and increasing reclaimable bytes ? */
597 we->we_wapbl = NULL;
598 if (we->we_bufcount == 0) {
599 #ifdef WAPBL_DEBUG_BUFBYTES
600 KASSERT(we->we_unsynced_bufbytes == 0);
601 #endif
602 wapbl_free(we);
603 }
604 }
605
606 /* Discard list of deallocs */
607 wl->wl_dealloccnt = 0;
608 /* XXX should we clear wl_reserved_bytes? */
609
610 KASSERT(wl->wl_bufbytes == 0);
611 KASSERT(wl->wl_bcount == 0);
612 KASSERT(wl->wl_bufcount == 0);
613 KASSERT(LIST_EMPTY(&wl->wl_bufs));
614 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
615 KASSERT(wl->wl_inohashcnt == 0);
616
617 rw_exit(&wl->wl_rwlock);
618 }
619
620 int
621 wapbl_stop(struct wapbl *wl, int force)
622 {
623 struct vnode *vp;
624 int error;
625
626 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
627 error = wapbl_flush(wl, 1);
628 if (error) {
629 if (force)
630 wapbl_discard(wl);
631 else
632 return error;
633 }
634
635 /* Unlinked inodes persist after a flush */
636 if (wl->wl_inohashcnt) {
637 if (force) {
638 wapbl_discard(wl);
639 } else {
640 return EBUSY;
641 }
642 }
643
644 KASSERT(wl->wl_bufbytes == 0);
645 KASSERT(wl->wl_bcount == 0);
646 KASSERT(wl->wl_bufcount == 0);
647 KASSERT(LIST_EMPTY(&wl->wl_bufs));
648 KASSERT(wl->wl_dealloccnt == 0);
649 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
650 KASSERT(wl->wl_inohashcnt == 0);
651
652 vp = wl->wl_logvp;
653
654 wapbl_free(wl->wl_wc_scratch);
655 wapbl_free(wl->wl_wc_header);
656 #if WAPBL_UVM_ALLOC
657 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_deallocblks,
658 round_page(sizeof(*wl->wl_deallocblks *
659 wl->wl_dealloclim)));
660 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_dealloclens,
661 round_page(sizeof(*wl->wl_dealloclens *
662 wl->wl_dealloclim)));
663 #else
664 wapbl_free(wl->wl_deallocblks);
665 wapbl_free(wl->wl_dealloclens);
666 #endif
667 wapbl_inodetrk_free(wl);
668
669 cv_destroy(&wl->wl_reclaimable_cv);
670 mutex_destroy(&wl->wl_mtx);
671 rw_destroy(&wl->wl_rwlock);
672 wapbl_free(wl);
673
674 return 0;
675 }
676
677 static int
678 wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
679 {
680 struct pstats *pstats = curlwp->l_proc->p_stats;
681 struct buf *bp;
682 int error;
683
684 KASSERT((flags & ~(B_WRITE | B_READ)) == 0);
685 KASSERT(devvp->v_type == VBLK);
686
687 if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
688 mutex_enter(&devvp->v_interlock);
689 devvp->v_numoutput++;
690 mutex_exit(&devvp->v_interlock);
691 pstats->p_ru.ru_oublock++;
692 } else {
693 pstats->p_ru.ru_inblock++;
694 }
695
696 bp = getiobuf(devvp, true);
697 bp->b_flags = flags;
698 bp->b_cflags = BC_BUSY; /* silly & dubious */
699 bp->b_dev = devvp->v_rdev;
700 bp->b_data = data;
701 bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
702 bp->b_blkno = pbn;
703
704 WAPBL_PRINTF(WAPBL_PRINT_IO,
705 ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%x\n",
706 BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
707 bp->b_blkno, bp->b_dev));
708
709 VOP_STRATEGY(devvp, bp);
710
711 error = biowait(bp);
712 putiobuf(bp);
713
714 if (error) {
715 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
716 ("wapbl_doio: %s %zu bytes at block %" PRId64
717 " on dev 0x%x failed with error %d\n",
718 (((flags & (B_WRITE | B_READ)) == B_WRITE) ?
719 "write" : "read"),
720 len, pbn, devvp->v_rdev, error));
721 }
722
723 return error;
724 }
725
726 int
727 wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
728 {
729
730 return wapbl_doio(data, len, devvp, pbn, B_WRITE);
731 }
732
733 int
734 wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
735 {
736
737 return wapbl_doio(data, len, devvp, pbn, B_READ);
738 }
739
740 /*
741 * Off is byte offset returns new offset for next write
742 * handles log wraparound
743 */
744 static int
745 wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
746 {
747 size_t slen;
748 off_t off = *offp;
749 int error;
750
751 KDASSERT(((len >> wl->wl_log_dev_bshift) <<
752 wl->wl_log_dev_bshift) == len);
753
754 if (off < wl->wl_circ_off)
755 off = wl->wl_circ_off;
756 slen = wl->wl_circ_off + wl->wl_circ_size - off;
757 if (slen < len) {
758 error = wapbl_write(data, slen, wl->wl_devvp,
759 wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
760 if (error)
761 return error;
762 data = (uint8_t *)data + slen;
763 len -= slen;
764 off = wl->wl_circ_off;
765 }
766 error = wapbl_write(data, len, wl->wl_devvp,
767 wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
768 if (error)
769 return error;
770 off += len;
771 if (off >= wl->wl_circ_off + wl->wl_circ_size)
772 off = wl->wl_circ_off;
773 *offp = off;
774 return 0;
775 }
776
777 /****************************************************************/
778
779 int
780 wapbl_begin(struct wapbl *wl, const char *file, int line)
781 {
782 int doflush;
783 unsigned lockcount;
784 krw_t op;
785
786 KDASSERT(wl);
787
788 /*
789 * XXX: The original code calls for the use of a RW_READER lock
790 * here, but it turns out there are performance issues with high
791 * metadata-rate workloads (e.g. multiple simultaneous tar
792 * extractions). For now, we force the lock to be RW_WRITER,
793 * since that currently has the best performance characteristics
794 * (even for a single tar-file extraction).
795 *
796 */
797 #define WAPBL_DEBUG_SERIALIZE 1
798
799 #ifdef WAPBL_DEBUG_SERIALIZE
800 op = RW_WRITER;
801 #else
802 op = RW_READER;
803 #endif
804
805 /*
806 * XXX this needs to be made much more sophisticated.
807 * perhaps each wapbl_begin could reserve a specified
808 * number of buffers and bytes.
809 */
810 mutex_enter(&wl->wl_mtx);
811 lockcount = wl->wl_lock_count;
812 doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) >
813 wl->wl_bufbytes_max / 2) ||
814 ((wl->wl_bufcount + (lockcount * 10)) >
815 wl->wl_bufcount_max / 2) ||
816 (wapbl_transaction_len(wl) > wl->wl_circ_size / 2);
817 mutex_exit(&wl->wl_mtx);
818
819 if (doflush) {
820 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
821 ("force flush lockcnt=%d bufbytes=%zu "
822 "(max=%zu) bufcount=%zu (max=%zu)\n",
823 lockcount, wl->wl_bufbytes,
824 wl->wl_bufbytes_max, wl->wl_bufcount,
825 wl->wl_bufcount_max));
826 }
827
828 if (doflush) {
829 int error = wapbl_flush(wl, 0);
830 if (error)
831 return error;
832 }
833
834 rw_enter(&wl->wl_rwlock, op);
835 mutex_enter(&wl->wl_mtx);
836 wl->wl_lock_count++;
837 mutex_exit(&wl->wl_mtx);
838
839 #if defined(WAPBL_DEBUG_PRINT) && defined(WAPBL_DEBUG_SERIALIZE)
840 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
841 ("wapbl_begin thread %d.%d with bufcount=%zu "
842 "bufbytes=%zu bcount=%zu at %s:%d\n",
843 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
844 wl->wl_bufbytes, wl->wl_bcount, file, line));
845 #endif
846
847 return 0;
848 }
849
850 void
851 wapbl_end(struct wapbl *wl)
852 {
853
854 #if defined(WAPBL_DEBUG_PRINT) && defined(WAPBL_DEBUG_SERIALIZE)
855 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
856 ("wapbl_end thread %d.%d with bufcount=%zu "
857 "bufbytes=%zu bcount=%zu\n",
858 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
859 wl->wl_bufbytes, wl->wl_bcount));
860 #endif
861
862 mutex_enter(&wl->wl_mtx);
863 KASSERT(wl->wl_lock_count > 0);
864 wl->wl_lock_count--;
865 mutex_exit(&wl->wl_mtx);
866
867 rw_exit(&wl->wl_rwlock);
868 }
869
870 void
871 wapbl_add_buf(struct wapbl *wl, struct buf * bp)
872 {
873
874 KASSERT(bp->b_cflags & BC_BUSY);
875 KASSERT(bp->b_vp);
876
877 wapbl_jlock_assert(wl);
878
879 #if 0
880 /*
881 * XXX this might be an issue for swapfiles.
882 * see uvm_swap.c:1702
883 *
884 * XXX2 why require it then? leap of semantics?
885 */
886 KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
887 #endif
888
889 mutex_enter(&wl->wl_mtx);
890 if (bp->b_flags & B_LOCKED) {
891 LIST_REMOVE(bp, b_wapbllist);
892 WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
893 ("wapbl_add_buf thread %d.%d re-adding buf %p "
894 "with %d bytes %d bcount\n",
895 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
896 bp->b_bcount));
897 } else {
898 /* unlocked by dirty buffers shouldn't exist */
899 KASSERT(!(bp->b_oflags & BO_DELWRI));
900 wl->wl_bufbytes += bp->b_bufsize;
901 wl->wl_bcount += bp->b_bcount;
902 wl->wl_bufcount++;
903 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
904 ("wapbl_add_buf thread %d.%d adding buf %p "
905 "with %d bytes %d bcount\n",
906 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
907 bp->b_bcount));
908 }
909 LIST_INSERT_HEAD(&wl->wl_bufs, bp, b_wapbllist);
910 mutex_exit(&wl->wl_mtx);
911
912 bp->b_flags |= B_LOCKED;
913 }
914
915 static void
916 wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
917 {
918
919 KASSERT(mutex_owned(&wl->wl_mtx));
920 KASSERT(bp->b_cflags & BC_BUSY);
921 wapbl_jlock_assert(wl);
922
923 #if 0
924 /*
925 * XXX this might be an issue for swapfiles.
926 * see uvm_swap.c:1725
927 *
928 * XXXdeux: see above
929 */
930 KASSERT((bp->b_flags & BC_NOCACHE) == 0);
931 #endif
932 KASSERT(bp->b_flags & B_LOCKED);
933
934 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
935 ("wapbl_remove_buf thread %d.%d removing buf %p with "
936 "%d bytes %d bcount\n",
937 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, bp->b_bcount));
938
939 KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
940 wl->wl_bufbytes -= bp->b_bufsize;
941 KASSERT(wl->wl_bcount >= bp->b_bcount);
942 wl->wl_bcount -= bp->b_bcount;
943 KASSERT(wl->wl_bufcount > 0);
944 wl->wl_bufcount--;
945 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
946 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
947 LIST_REMOVE(bp, b_wapbllist);
948
949 bp->b_flags &= ~B_LOCKED;
950 }
951
952 /* called from brelsel() in vfs_bio among other places */
953 void
954 wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
955 {
956
957 mutex_enter(&wl->wl_mtx);
958 wapbl_remove_buf_locked(wl, bp);
959 mutex_exit(&wl->wl_mtx);
960 }
961
962 void
963 wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
964 {
965
966 KASSERT(bp->b_cflags & BC_BUSY);
967
968 /*
969 * XXX: why does this depend on B_LOCKED? otherwise the buf
970 * is not for a transaction? if so, why is this called in the
971 * first place?
972 */
973 if (bp->b_flags & B_LOCKED) {
974 mutex_enter(&wl->wl_mtx);
975 wl->wl_bufbytes += bp->b_bufsize - oldsz;
976 wl->wl_bcount += bp->b_bcount - oldcnt;
977 mutex_exit(&wl->wl_mtx);
978 }
979 }
980
981 #endif /* _KERNEL */
982
983 /****************************************************************/
984 /* Some utility inlines */
985
986 /* This is used to advance the pointer at old to new value at old+delta */
987 static __inline off_t
988 wapbl_advance(size_t size, size_t off, off_t old, size_t delta)
989 {
990 off_t new;
991
992 /* Define acceptable ranges for inputs. */
993 KASSERT(delta <= size);
994 KASSERT((old == 0) || (old >= off));
995 KASSERT(old < (size + off));
996
997 if ((old == 0) && (delta != 0))
998 new = off + delta;
999 else if ((old + delta) < (size + off))
1000 new = old + delta;
1001 else
1002 new = (old + delta) - size;
1003
1004 /* Note some interesting axioms */
1005 KASSERT((delta != 0) || (new == old));
1006 KASSERT((delta == 0) || (new != 0));
1007 KASSERT((delta != (size)) || (new == old));
1008
1009 /* Define acceptable ranges for output. */
1010 KASSERT((new == 0) || (new >= off));
1011 KASSERT(new < (size + off));
1012 return new;
1013 }
1014
1015 static __inline size_t
1016 wapbl_space_used(size_t avail, off_t head, off_t tail)
1017 {
1018
1019 if (tail == 0) {
1020 KASSERT(head == 0);
1021 return 0;
1022 }
1023 return ((head + (avail - 1) - tail) % avail) + 1;
1024 }
1025
1026 static __inline size_t
1027 wapbl_space_free(size_t avail, off_t head, off_t tail)
1028 {
1029
1030 return avail - wapbl_space_used(avail, head, tail);
1031 }
1032
1033 static __inline void
1034 wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1035 off_t *tailp)
1036 {
1037 off_t head = *headp;
1038 off_t tail = *tailp;
1039
1040 KASSERT(delta <= wapbl_space_free(size, head, tail));
1041 head = wapbl_advance(size, off, head, delta);
1042 if ((tail == 0) && (head != 0))
1043 tail = off;
1044 *headp = head;
1045 *tailp = tail;
1046 }
1047
1048 static __inline void
1049 wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1050 off_t *tailp)
1051 {
1052 off_t head = *headp;
1053 off_t tail = *tailp;
1054
1055 KASSERT(delta <= wapbl_space_used(size, head, tail));
1056 tail = wapbl_advance(size, off, tail, delta);
1057 if (head == tail) {
1058 head = tail = 0;
1059 }
1060 *headp = head;
1061 *tailp = tail;
1062 }
1063
1064 #ifdef _KERNEL
1065
1066 /****************************************************************/
1067
1068 /*
1069 * Remove transactions whose buffers are completely flushed to disk.
1070 * Will block until at least minfree space is available.
1071 * only intended to be called from inside wapbl_flush and therefore
1072 * does not protect against commit races with itself or with flush.
1073 */
1074 static int
1075 wapbl_truncate(struct wapbl *wl, size_t minfree, int waitonly)
1076 {
1077 size_t delta;
1078 size_t avail;
1079 off_t head;
1080 off_t tail;
1081 int error = 0;
1082
1083 KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1084 KASSERT(rw_write_held(&wl->wl_rwlock));
1085
1086 mutex_enter(&wl->wl_mtx);
1087
1088 /*
1089 * First check to see if we have to do a commit
1090 * at all.
1091 */
1092 avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1093 if (minfree < avail) {
1094 mutex_exit(&wl->wl_mtx);
1095 return 0;
1096 }
1097 minfree -= avail;
1098 while ((wl->wl_error_count == 0) &&
1099 (wl->wl_reclaimable_bytes < minfree)) {
1100 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1101 ("wapbl_truncate: sleeping on %p wl=%p bytes=%zd "
1102 "minfree=%zd\n",
1103 &wl->wl_reclaimable_bytes, wl, wl->wl_reclaimable_bytes,
1104 minfree));
1105
1106 cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1107 }
1108 if (wl->wl_reclaimable_bytes < minfree) {
1109 KASSERT(wl->wl_error_count);
1110 /* XXX maybe get actual error from buffer instead someday? */
1111 error = EIO;
1112 }
1113 head = wl->wl_head;
1114 tail = wl->wl_tail;
1115 delta = wl->wl_reclaimable_bytes;
1116
1117 /* If all of of the entries are flushed, then be sure to keep
1118 * the reserved bytes reserved. Watch out for discarded transactions,
1119 * which could leave more bytes reserved than are reclaimable.
1120 */
1121 if (SIMPLEQ_EMPTY(&wl->wl_entries) &&
1122 (delta >= wl->wl_reserved_bytes)) {
1123 delta -= wl->wl_reserved_bytes;
1124 }
1125 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1126 &tail);
1127 KDASSERT(wl->wl_reserved_bytes <=
1128 wapbl_space_used(wl->wl_circ_size, head, tail));
1129 mutex_exit(&wl->wl_mtx);
1130
1131 if (error)
1132 return error;
1133
1134 if (waitonly)
1135 return 0;
1136
1137 /*
1138 * This is where head, tail and delta are unprotected
1139 * from races against itself or flush. This is ok since
1140 * we only call this routine from inside flush itself.
1141 *
1142 * XXX: how can it race against itself when accessed only
1143 * from behind the write-locked rwlock?
1144 */
1145 error = wapbl_write_commit(wl, head, tail);
1146 if (error)
1147 return error;
1148
1149 wl->wl_head = head;
1150 wl->wl_tail = tail;
1151
1152 mutex_enter(&wl->wl_mtx);
1153 KASSERT(wl->wl_reclaimable_bytes >= delta);
1154 wl->wl_reclaimable_bytes -= delta;
1155 mutex_exit(&wl->wl_mtx);
1156 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1157 ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1158 curproc->p_pid, curlwp->l_lid, delta));
1159
1160 return 0;
1161 }
1162
1163 /****************************************************************/
1164
1165 void
1166 wapbl_biodone(struct buf *bp)
1167 {
1168 struct wapbl_entry *we = bp->b_private;
1169 struct wapbl *wl = we->we_wapbl;
1170
1171 /*
1172 * Handle possible flushing of buffers after log has been
1173 * decomissioned.
1174 */
1175 if (!wl) {
1176 KASSERT(we->we_bufcount > 0);
1177 we->we_bufcount--;
1178 #ifdef WAPBL_DEBUG_BUFBYTES
1179 KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1180 we->we_unsynced_bufbytes -= bp->b_bufsize;
1181 #endif
1182
1183 if (we->we_bufcount == 0) {
1184 #ifdef WAPBL_DEBUG_BUFBYTES
1185 KASSERT(we->we_unsynced_bufbytes == 0);
1186 #endif
1187 wapbl_free(we);
1188 }
1189
1190 brelse(bp, 0);
1191 return;
1192 }
1193
1194 #ifdef ohbother
1195 KDASSERT(bp->b_flags & B_DONE);
1196 KDASSERT(!(bp->b_flags & B_DELWRI));
1197 KDASSERT(bp->b_flags & B_ASYNC);
1198 KDASSERT(bp->b_flags & B_BUSY);
1199 KDASSERT(!(bp->b_flags & B_LOCKED));
1200 KDASSERT(!(bp->b_flags & B_READ));
1201 KDASSERT(!(bp->b_flags & B_INVAL));
1202 KDASSERT(!(bp->b_flags & B_NOCACHE));
1203 #endif
1204
1205 if (bp->b_error) {
1206 #ifdef notyet /* Can't currently handle possible dirty buffer reuse */
1207 XXXpooka: interfaces not fully updated
1208 Note: this was not enabled in the original patch
1209 against netbsd4 either. I don't know if comment
1210 above is true or not.
1211
1212 /*
1213 * If an error occurs, report the error and leave the
1214 * buffer as a delayed write on the LRU queue.
1215 * restarting the write would likely result in
1216 * an error spinloop, so let it be done harmlessly
1217 * by the syncer.
1218 */
1219 bp->b_flags &= ~(B_DONE);
1220 simple_unlock(&bp->b_interlock);
1221
1222 if (we->we_error == 0) {
1223 mutex_enter(&wl->wl_mtx);
1224 wl->wl_error_count++;
1225 mutex_exit(&wl->wl_mtx);
1226 cv_broadcast(&wl->wl_reclaimable_cv);
1227 }
1228 we->we_error = bp->b_error;
1229 bp->b_error = 0;
1230 brelse(bp);
1231 return;
1232 #else
1233 /* For now, just mark the log permanently errored out */
1234
1235 mutex_enter(&wl->wl_mtx);
1236 if (wl->wl_error_count == 0) {
1237 wl->wl_error_count++;
1238 cv_broadcast(&wl->wl_reclaimable_cv);
1239 }
1240 mutex_exit(&wl->wl_mtx);
1241 #endif
1242 }
1243
1244 mutex_enter(&wl->wl_mtx);
1245
1246 KASSERT(we->we_bufcount > 0);
1247 we->we_bufcount--;
1248 #ifdef WAPBL_DEBUG_BUFBYTES
1249 KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1250 we->we_unsynced_bufbytes -= bp->b_bufsize;
1251 KASSERT(wl->wl_unsynced_bufbytes >= bp->b_bufsize);
1252 wl->wl_unsynced_bufbytes -= bp->b_bufsize;
1253 #endif
1254
1255 /*
1256 * If the current transaction can be reclaimed, start
1257 * at the beginning and reclaim any consecutive reclaimable
1258 * transactions. If we successfully reclaim anything,
1259 * then wakeup anyone waiting for the reclaim.
1260 */
1261 if (we->we_bufcount == 0) {
1262 size_t delta = 0;
1263 int errcnt = 0;
1264 #ifdef WAPBL_DEBUG_BUFBYTES
1265 KDASSERT(we->we_unsynced_bufbytes == 0);
1266 #endif
1267 /*
1268 * clear any posted error, since the buffer it came from
1269 * has successfully flushed by now
1270 */
1271 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1272 (we->we_bufcount == 0)) {
1273 delta += we->we_reclaimable_bytes;
1274 if (we->we_error)
1275 errcnt++;
1276 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1277 wapbl_free(we);
1278 }
1279
1280 if (delta) {
1281 wl->wl_reclaimable_bytes += delta;
1282 KASSERT(wl->wl_error_count >= errcnt);
1283 wl->wl_error_count -= errcnt;
1284 cv_broadcast(&wl->wl_reclaimable_cv);
1285 }
1286 }
1287
1288 mutex_exit(&wl->wl_mtx);
1289 brelse(bp, 0);
1290 }
1291
1292 /*
1293 * Write transactions to disk + start I/O for contents
1294 */
1295 int
1296 wapbl_flush(struct wapbl *wl, int waitfor)
1297 {
1298 struct buf *bp;
1299 struct wapbl_entry *we;
1300 off_t off;
1301 off_t head;
1302 off_t tail;
1303 size_t delta = 0;
1304 size_t flushsize;
1305 size_t reserved;
1306 int error = 0;
1307
1308 /*
1309 * Do a quick check to see if a full flush can be skipped
1310 * This assumes that the flush callback does not need to be called
1311 * unless there are other outstanding bufs.
1312 */
1313 if (!waitfor) {
1314 size_t nbufs;
1315 mutex_enter(&wl->wl_mtx); /* XXX need mutex here to
1316 protect the KASSERTS */
1317 nbufs = wl->wl_bufcount;
1318 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1319 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1320 mutex_exit(&wl->wl_mtx);
1321 if (nbufs == 0)
1322 return 0;
1323 }
1324
1325 /*
1326 * XXX we may consider using LK_UPGRADE here
1327 * if we want to call flush from inside a transaction
1328 */
1329 rw_enter(&wl->wl_rwlock, RW_WRITER);
1330 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
1331 wl->wl_dealloccnt);
1332
1333 /*
1334 * Now that we are fully locked and flushed,
1335 * do another check for nothing to do.
1336 */
1337 if (wl->wl_bufcount == 0) {
1338 goto out;
1339 }
1340
1341 #if 0
1342 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1343 ("wapbl_flush thread %d.%d flushing entries with "
1344 "bufcount=%zu bufbytes=%zu\n",
1345 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1346 wl->wl_bufbytes));
1347 #endif
1348
1349 /* Calculate amount of space needed to flush */
1350 flushsize = wapbl_transaction_len(wl);
1351
1352 if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1353 /*
1354 * XXX this could be handled more gracefully, perhaps place
1355 * only a partial transaction in the log and allow the
1356 * remaining to flush without the protection of the journal.
1357 */
1358 panic("wapbl_flush: current transaction too big to flush\n");
1359 }
1360
1361 error = wapbl_truncate(wl, flushsize, 0);
1362 if (error)
1363 goto out2;
1364
1365 off = wl->wl_head;
1366 KASSERT((off == 0) || ((off >= wl->wl_circ_off) &&
1367 (off < wl->wl_circ_off + wl->wl_circ_size)));
1368 error = wapbl_write_blocks(wl, &off);
1369 if (error)
1370 goto out2;
1371 error = wapbl_write_revocations(wl, &off);
1372 if (error)
1373 goto out2;
1374 error = wapbl_write_inodes(wl, &off);
1375 if (error)
1376 goto out2;
1377
1378 reserved = 0;
1379 if (wl->wl_inohashcnt)
1380 reserved = wapbl_transaction_inodes_len(wl);
1381
1382 head = wl->wl_head;
1383 tail = wl->wl_tail;
1384
1385 wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1386 &head, &tail);
1387 #ifdef WAPBL_DEBUG
1388 if (head != off) {
1389 panic("lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1390 " off=%"PRIdMAX" flush=%zu\n",
1391 (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1392 flushsize);
1393 }
1394 #else
1395 KASSERT(head == off);
1396 #endif
1397
1398 /* Opportunistically move the tail forward if we can */
1399 if (!wapbl_lazy_truncate) {
1400 mutex_enter(&wl->wl_mtx);
1401 delta = wl->wl_reclaimable_bytes;
1402 mutex_exit(&wl->wl_mtx);
1403 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1404 &head, &tail);
1405 }
1406
1407 error = wapbl_write_commit(wl, head, tail);
1408 if (error)
1409 goto out2;
1410
1411 /* poolme? or kmemme? */
1412 we = wapbl_calloc(1, sizeof(*we));
1413
1414 #ifdef WAPBL_DEBUG_BUFBYTES
1415 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1416 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1417 " unsynced=%zu"
1418 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1419 "inodes=%d\n",
1420 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1421 wapbl_space_used(wl->wl_circ_size, head, tail),
1422 wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1423 wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1424 wl->wl_inohashcnt));
1425 #else
1426 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1427 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1428 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1429 "inodes=%d\n",
1430 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1431 wapbl_space_used(wl->wl_circ_size, head, tail),
1432 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1433 wl->wl_dealloccnt, wl->wl_inohashcnt));
1434 #endif
1435
1436
1437 mutex_enter(&bufcache_lock);
1438 mutex_enter(&wl->wl_mtx);
1439
1440 wl->wl_reserved_bytes = reserved;
1441 wl->wl_head = head;
1442 wl->wl_tail = tail;
1443 KASSERT(wl->wl_reclaimable_bytes >= delta);
1444 wl->wl_reclaimable_bytes -= delta;
1445 wl->wl_dealloccnt = 0;
1446 #ifdef WAPBL_DEBUG_BUFBYTES
1447 wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1448 #endif
1449
1450 we->we_wapbl = wl;
1451 we->we_bufcount = wl->wl_bufcount;
1452 #ifdef WAPBL_DEBUG_BUFBYTES
1453 we->we_unsynced_bufbytes = wl->wl_bufbytes;
1454 #endif
1455 we->we_reclaimable_bytes = flushsize;
1456 we->we_error = 0;
1457 SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1458
1459 /*
1460 * this flushes bufs in reverse order than they were queued
1461 * it shouldn't matter, but if we care we could use TAILQ instead.
1462 * XXX Note they will get put on the lru queue when they flush
1463 * so we might actually want to change this to preserve order.
1464 */
1465 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
1466 if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1467 continue;
1468 }
1469 bp->b_iodone = wapbl_biodone;
1470 bp->b_private = we;
1471 bremfree(bp);
1472 wapbl_remove_buf_locked(wl, bp);
1473 mutex_exit(&wl->wl_mtx);
1474 mutex_exit(&bufcache_lock);
1475 bawrite(bp);
1476 mutex_enter(&bufcache_lock);
1477 mutex_enter(&wl->wl_mtx);
1478 }
1479 mutex_exit(&wl->wl_mtx);
1480 mutex_exit(&bufcache_lock);
1481
1482 #if 0
1483 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1484 ("wapbl_flush thread %d.%d done flushing entries...\n",
1485 curproc->p_pid, curlwp->l_lid));
1486 #endif
1487
1488 out:
1489
1490 /*
1491 * If the waitfor flag is set, don't return until everything is
1492 * fully flushed and the on disk log is empty.
1493 */
1494 if (waitfor) {
1495 error = wapbl_truncate(wl, wl->wl_circ_size -
1496 wl->wl_reserved_bytes, wapbl_lazy_truncate);
1497 }
1498
1499 out2:
1500 if (error) {
1501 wl->wl_flush_abort(wl->wl_mount, wl->wl_deallocblks,
1502 wl->wl_dealloclens, wl->wl_dealloccnt);
1503 }
1504
1505 #ifdef WAPBL_DEBUG_PRINT
1506 if (error) {
1507 pid_t pid = -1;
1508 lwpid_t lid = -1;
1509 if (curproc)
1510 pid = curproc->p_pid;
1511 if (curlwp)
1512 lid = curlwp->l_lid;
1513 mutex_enter(&wl->wl_mtx);
1514 #ifdef WAPBL_DEBUG_BUFBYTES
1515 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1516 ("wapbl_flush: thread %d.%d aborted flush: "
1517 "error = %d\n"
1518 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1519 "deallocs=%d inodes=%d\n"
1520 "\terrcnt = %d, reclaimable=%zu reserved=%zu "
1521 "unsynced=%zu\n",
1522 pid, lid, error, wl->wl_bufcount,
1523 wl->wl_bufbytes, wl->wl_bcount,
1524 wl->wl_dealloccnt, wl->wl_inohashcnt,
1525 wl->wl_error_count, wl->wl_reclaimable_bytes,
1526 wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1527 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1528 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1529 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1530 "error = %d, unsynced = %zu\n",
1531 we->we_bufcount, we->we_reclaimable_bytes,
1532 we->we_error, we->we_unsynced_bufbytes));
1533 }
1534 #else
1535 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1536 ("wapbl_flush: thread %d.%d aborted flush: "
1537 "error = %d\n"
1538 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1539 "deallocs=%d inodes=%d\n"
1540 "\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
1541 pid, lid, error, wl->wl_bufcount,
1542 wl->wl_bufbytes, wl->wl_bcount,
1543 wl->wl_dealloccnt, wl->wl_inohashcnt,
1544 wl->wl_error_count, wl->wl_reclaimable_bytes,
1545 wl->wl_reserved_bytes));
1546 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1547 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1548 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1549 "error = %d\n", we->we_bufcount,
1550 we->we_reclaimable_bytes, we->we_error));
1551 }
1552 #endif
1553 mutex_exit(&wl->wl_mtx);
1554 }
1555 #endif
1556
1557 rw_exit(&wl->wl_rwlock);
1558 return error;
1559 }
1560
1561 /****************************************************************/
1562
1563 void
1564 wapbl_jlock_assert(struct wapbl *wl)
1565 {
1566
1567 #ifdef WAPBL_DEBUG_SERIALIZE
1568 KASSERT(rw_write_held(&wl->wl_rwlock));
1569 #else
1570 KASSERT(rw_read_held(&wl->wl_rwlock) || rw_write_held(&wl->wl_rwlock));
1571 #endif
1572 }
1573
1574 void
1575 wapbl_junlock_assert(struct wapbl *wl)
1576 {
1577
1578 #ifdef WAPBL_DEBUG_SERIALIZE
1579 KASSERT(!rw_write_held(&wl->wl_rwlock));
1580 #endif
1581 }
1582
1583 /****************************************************************/
1584
1585 /* locks missing */
1586 void
1587 wapbl_print(struct wapbl *wl,
1588 int full,
1589 void (*pr)(const char *, ...))
1590 {
1591 struct buf *bp;
1592 struct wapbl_entry *we;
1593 (*pr)("wapbl %p", wl);
1594 (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
1595 wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
1596 (*pr)("circ = %zu, header = %zu, head = %"PRIdMAX" tail = %"PRIdMAX"\n",
1597 wl->wl_circ_size, wl->wl_circ_off,
1598 (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
1599 (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
1600 wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
1601 #ifdef WAPBL_DEBUG_BUFBYTES
1602 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1603 "reserved = %zu errcnt = %d unsynced = %zu\n",
1604 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1605 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1606 wl->wl_error_count, wl->wl_unsynced_bufbytes);
1607 #else
1608 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1609 "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
1610 wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1611 wl->wl_error_count);
1612 #endif
1613 (*pr)("\tdealloccnt = %d, dealloclim = %d\n",
1614 wl->wl_dealloccnt, wl->wl_dealloclim);
1615 (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
1616 wl->wl_inohashcnt, wl->wl_inohashmask);
1617 (*pr)("entries:\n");
1618 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1619 #ifdef WAPBL_DEBUG_BUFBYTES
1620 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
1621 "unsynced = %zu\n",
1622 we->we_bufcount, we->we_reclaimable_bytes,
1623 we->we_error, we->we_unsynced_bufbytes);
1624 #else
1625 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
1626 we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
1627 #endif
1628 }
1629 if (full) {
1630 int cnt = 0;
1631 (*pr)("bufs =");
1632 LIST_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
1633 if (!LIST_NEXT(bp, b_wapbllist)) {
1634 (*pr)(" %p", bp);
1635 } else if ((++cnt % 6) == 0) {
1636 (*pr)(" %p,\n\t", bp);
1637 } else {
1638 (*pr)(" %p,", bp);
1639 }
1640 }
1641 (*pr)("\n");
1642
1643 (*pr)("dealloced blks = ");
1644 {
1645 int i;
1646 cnt = 0;
1647 for (i = 0; i < wl->wl_dealloccnt; i++) {
1648 (*pr)(" %"PRId64":%d,",
1649 wl->wl_deallocblks[i],
1650 wl->wl_dealloclens[i]);
1651 if ((++cnt % 4) == 0) {
1652 (*pr)("\n\t");
1653 }
1654 }
1655 }
1656 (*pr)("\n");
1657
1658 (*pr)("registered inodes = ");
1659 {
1660 int i;
1661 cnt = 0;
1662 for (i = 0; i <= wl->wl_inohashmask; i++) {
1663 struct wapbl_ino_head *wih;
1664 struct wapbl_ino *wi;
1665
1666 wih = &wl->wl_inohash[i];
1667 LIST_FOREACH(wi, wih, wi_hash) {
1668 if (wi->wi_ino == 0)
1669 continue;
1670 (*pr)(" %"PRId32"/0%06"PRIo32",",
1671 wi->wi_ino, wi->wi_mode);
1672 if ((++cnt % 4) == 0) {
1673 (*pr)("\n\t");
1674 }
1675 }
1676 }
1677 (*pr)("\n");
1678 }
1679 }
1680 }
1681
1682 #if defined(WAPBL_DEBUG) || defined(DDB)
1683 void
1684 wapbl_dump(struct wapbl *wl)
1685 {
1686 #if defined(WAPBL_DEBUG)
1687 if (!wl)
1688 wl = wapbl_debug_wl;
1689 #endif
1690 if (!wl)
1691 return;
1692 wapbl_print(wl, 1, printf);
1693 }
1694 #endif
1695
1696 /****************************************************************/
1697
1698 void
1699 wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len)
1700 {
1701
1702 wapbl_jlock_assert(wl);
1703
1704 /* XXX should eventually instead tie this into resource estimation */
1705 /* XXX this KASSERT needs locking/mutex analysis */
1706 KASSERT(wl->wl_dealloccnt < wl->wl_dealloclim);
1707 wl->wl_deallocblks[wl->wl_dealloccnt] = blk;
1708 wl->wl_dealloclens[wl->wl_dealloccnt] = len;
1709 wl->wl_dealloccnt++;
1710 WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
1711 ("wapbl_register_deallocation: blk=%"PRId64" len=%d\n", blk, len));
1712 }
1713
1714 /****************************************************************/
1715
1716 static void
1717 wapbl_inodetrk_init(struct wapbl *wl, u_int size)
1718 {
1719
1720 wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
1721 if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) {
1722 pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0,
1723 "wapblinopl", &pool_allocator_nointr, IPL_NONE);
1724 }
1725 }
1726
1727 static void
1728 wapbl_inodetrk_free(struct wapbl *wl)
1729 {
1730
1731 /* XXX this KASSERT needs locking/mutex analysis */
1732 KASSERT(wl->wl_inohashcnt == 0);
1733 hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
1734 if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) {
1735 pool_destroy(&wapbl_ino_pool);
1736 }
1737 }
1738
1739 static struct wapbl_ino *
1740 wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
1741 {
1742 struct wapbl_ino_head *wih;
1743 struct wapbl_ino *wi;
1744
1745 KASSERT(mutex_owned(&wl->wl_mtx));
1746
1747 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1748 LIST_FOREACH(wi, wih, wi_hash) {
1749 if (ino == wi->wi_ino)
1750 return wi;
1751 }
1752 return 0;
1753 }
1754
1755 void
1756 wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1757 {
1758 struct wapbl_ino_head *wih;
1759 struct wapbl_ino *wi;
1760
1761 wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
1762
1763 mutex_enter(&wl->wl_mtx);
1764 if (wapbl_inodetrk_get(wl, ino) == NULL) {
1765 wi->wi_ino = ino;
1766 wi->wi_mode = mode;
1767 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1768 LIST_INSERT_HEAD(wih, wi, wi_hash);
1769 wl->wl_inohashcnt++;
1770 WAPBL_PRINTF(WAPBL_PRINT_INODE,
1771 ("wapbl_register_inode: ino=%"PRId64"\n", ino));
1772 mutex_exit(&wl->wl_mtx);
1773 } else {
1774 mutex_exit(&wl->wl_mtx);
1775 pool_put(&wapbl_ino_pool, wi);
1776 }
1777 }
1778
1779 void
1780 wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1781 {
1782 struct wapbl_ino *wi;
1783
1784 mutex_enter(&wl->wl_mtx);
1785 wi = wapbl_inodetrk_get(wl, ino);
1786 if (wi) {
1787 WAPBL_PRINTF(WAPBL_PRINT_INODE,
1788 ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
1789 KASSERT(wl->wl_inohashcnt > 0);
1790 wl->wl_inohashcnt--;
1791 LIST_REMOVE(wi, wi_hash);
1792 mutex_exit(&wl->wl_mtx);
1793
1794 pool_put(&wapbl_ino_pool, wi);
1795 } else {
1796 mutex_exit(&wl->wl_mtx);
1797 }
1798 }
1799
1800 /****************************************************************/
1801
1802 static __inline size_t
1803 wapbl_transaction_inodes_len(struct wapbl *wl)
1804 {
1805 int blocklen = 1<<wl->wl_log_dev_bshift;
1806 int iph;
1807
1808 /* Calculate number of inodes described in a inodelist header */
1809 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
1810 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
1811
1812 KASSERT(iph > 0);
1813
1814 return MAX(1, howmany(wl->wl_inohashcnt, iph))*blocklen;
1815 }
1816
1817
1818 /* Calculate amount of space a transaction will take on disk */
1819 static size_t
1820 wapbl_transaction_len(struct wapbl *wl)
1821 {
1822 int blocklen = 1<<wl->wl_log_dev_bshift;
1823 size_t len;
1824 int bph;
1825
1826 /* Calculate number of blocks described in a blocklist header */
1827 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1828 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1829
1830 KASSERT(bph > 0);
1831
1832 len = wl->wl_bcount;
1833 len += howmany(wl->wl_bufcount, bph)*blocklen;
1834 len += howmany(wl->wl_dealloccnt, bph)*blocklen;
1835 len += wapbl_transaction_inodes_len(wl);
1836
1837 return len;
1838 }
1839
1840 /*
1841 * Perform commit operation
1842 *
1843 * Note that generation number incrementation needs to
1844 * be protected against racing with other invocations
1845 * of wapbl_commit. This is ok since this routine
1846 * is only invoked from wapbl_flush
1847 */
1848 static int
1849 wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
1850 {
1851 struct wapbl_wc_header *wc = wl->wl_wc_header;
1852 struct timespec ts;
1853 int error;
1854 int force = 1;
1855
1856 /* XXX Calc checksum here, instead we do this for now */
1857 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1858 if (error) {
1859 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1860 ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1861 "returned %d\n", wl->wl_devvp->v_rdev, error));
1862 }
1863
1864 wc->wc_head = head;
1865 wc->wc_tail = tail;
1866 wc->wc_checksum = 0;
1867 wc->wc_version = 1;
1868 getnanotime(&ts);
1869 wc->wc_time = ts.tv_sec;;
1870 wc->wc_timensec = ts.tv_nsec;
1871
1872 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1873 ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
1874 (intmax_t)head, (intmax_t)tail));
1875
1876 /*
1877 * XXX if generation will rollover, then first zero
1878 * over second commit header before trying to write both headers.
1879 */
1880
1881 error = wapbl_write(wc, wc->wc_len, wl->wl_devvp,
1882 wl->wl_logpbn + wc->wc_generation % 2);
1883 if (error)
1884 return error;
1885
1886 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1887 if (error) {
1888 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1889 ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1890 "returned %d\n", wl->wl_devvp->v_rdev, error));
1891 }
1892
1893 /*
1894 * If the generation number was zero, write it out a second time.
1895 * This handles initialization and generation number rollover
1896 */
1897 if (wc->wc_generation++ == 0) {
1898 error = wapbl_write_commit(wl, head, tail);
1899 /*
1900 * This panic should be able to be removed if we do the
1901 * zero'ing mentioned above, and we are certain to roll
1902 * back generation number on failure.
1903 */
1904 if (error)
1905 panic("wapbl_write_commit: error writing duplicate "
1906 "log header: %d\n", error);
1907 }
1908 return 0;
1909 }
1910
1911 /* Returns new offset value */
1912 static int
1913 wapbl_write_blocks(struct wapbl *wl, off_t *offp)
1914 {
1915 struct wapbl_wc_blocklist *wc =
1916 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1917 int blocklen = 1<<wl->wl_log_dev_bshift;
1918 int bph;
1919 struct buf *bp;
1920 off_t off = *offp;
1921 int error;
1922
1923 KASSERT(rw_write_held(&wl->wl_rwlock));
1924
1925 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1926 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1927
1928 bp = LIST_FIRST(&wl->wl_bufs);
1929
1930 while (bp) {
1931 int cnt;
1932 struct buf *obp = bp;
1933
1934 KASSERT(bp->b_flags & B_LOCKED);
1935
1936 wc->wc_type = WAPBL_WC_BLOCKS;
1937 wc->wc_len = blocklen;
1938 wc->wc_blkcount = 0;
1939 while (bp && (wc->wc_blkcount < bph)) {
1940 /*
1941 * Make sure all the physical block numbers are up to
1942 * date. If this is not always true on a given
1943 * filesystem, then VOP_BMAP must be called. We
1944 * could call VOP_BMAP here, or else in the filesystem
1945 * specific flush callback, although neither of those
1946 * solutions allow us to take the vnode lock. If a
1947 * filesystem requires that we must take the vnode lock
1948 * to call VOP_BMAP, then we can probably do it in
1949 * bwrite when the vnode lock should already be held
1950 * by the invoking code.
1951 */
1952 KASSERT((bp->b_vp->v_type == VBLK) ||
1953 (bp->b_blkno != bp->b_lblkno));
1954 KASSERT(bp->b_blkno > 0);
1955
1956 wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
1957 wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
1958 wc->wc_len += bp->b_bcount;
1959 wc->wc_blkcount++;
1960 bp = LIST_NEXT(bp, b_wapbllist);
1961 }
1962 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1963 ("wapbl_write_blocks: len = %u off = %"PRIdMAX"\n",
1964 wc->wc_len, (intmax_t)off));
1965
1966 error = wapbl_circ_write(wl, wc, blocklen, &off);
1967 if (error)
1968 return error;
1969 bp = obp;
1970 cnt = 0;
1971 while (bp && (cnt++ < bph)) {
1972 error = wapbl_circ_write(wl, bp->b_data,
1973 bp->b_bcount, &off);
1974 if (error)
1975 return error;
1976 bp = LIST_NEXT(bp, b_wapbllist);
1977 }
1978 }
1979 *offp = off;
1980 return 0;
1981 }
1982
1983 static int
1984 wapbl_write_revocations(struct wapbl *wl, off_t *offp)
1985 {
1986 struct wapbl_wc_blocklist *wc =
1987 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1988 int i;
1989 int blocklen = 1<<wl->wl_log_dev_bshift;
1990 int bph;
1991 off_t off = *offp;
1992 int error;
1993
1994 if (wl->wl_dealloccnt == 0)
1995 return 0;
1996
1997 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1998 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1999
2000 i = 0;
2001 while (i < wl->wl_dealloccnt) {
2002 wc->wc_type = WAPBL_WC_REVOCATIONS;
2003 wc->wc_len = blocklen;
2004 wc->wc_blkcount = 0;
2005 while ((i < wl->wl_dealloccnt) && (wc->wc_blkcount < bph)) {
2006 wc->wc_blocks[wc->wc_blkcount].wc_daddr =
2007 wl->wl_deallocblks[i];
2008 wc->wc_blocks[wc->wc_blkcount].wc_dlen =
2009 wl->wl_dealloclens[i];
2010 wc->wc_blkcount++;
2011 i++;
2012 }
2013 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2014 ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
2015 wc->wc_len, (intmax_t)off));
2016 error = wapbl_circ_write(wl, wc, blocklen, &off);
2017 if (error)
2018 return error;
2019 }
2020 *offp = off;
2021 return 0;
2022 }
2023
2024 static int
2025 wapbl_write_inodes(struct wapbl *wl, off_t *offp)
2026 {
2027 struct wapbl_wc_inodelist *wc =
2028 (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
2029 int i;
2030 int blocklen = 1<<wl->wl_log_dev_bshift;
2031 off_t off = *offp;
2032 int error;
2033
2034 struct wapbl_ino_head *wih;
2035 struct wapbl_ino *wi;
2036 int iph;
2037
2038 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2039 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2040
2041 i = 0;
2042 wih = &wl->wl_inohash[0];
2043 wi = 0;
2044 do {
2045 wc->wc_type = WAPBL_WC_INODES;
2046 wc->wc_len = blocklen;
2047 wc->wc_inocnt = 0;
2048 wc->wc_clear = (i == 0);
2049 while ((i < wl->wl_inohashcnt) && (wc->wc_inocnt < iph)) {
2050 while (!wi) {
2051 KASSERT((wih - &wl->wl_inohash[0])
2052 <= wl->wl_inohashmask);
2053 wi = LIST_FIRST(wih++);
2054 }
2055 wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2056 wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2057 wc->wc_inocnt++;
2058 i++;
2059 wi = LIST_NEXT(wi, wi_hash);
2060 }
2061 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2062 ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2063 wc->wc_len, (intmax_t)off));
2064 error = wapbl_circ_write(wl, wc, blocklen, &off);
2065 if (error)
2066 return error;
2067 } while (i < wl->wl_inohashcnt);
2068
2069 *offp = off;
2070 return 0;
2071 }
2072
2073 #endif /* _KERNEL */
2074
2075 /****************************************************************/
2076
2077 #ifdef _KERNEL
2078 static struct pool wapbl_blk_pool;
2079 static int wapbl_blk_pool_refcount;
2080 #endif
2081 struct wapbl_blk {
2082 LIST_ENTRY(wapbl_blk) wb_hash;
2083 daddr_t wb_blk;
2084 off_t wb_off; /* Offset of this block in the log */
2085 };
2086 #define WAPBL_BLKPOOL_MIN 83
2087
2088 static void
2089 wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2090 {
2091 if (size < WAPBL_BLKPOOL_MIN)
2092 size = WAPBL_BLKPOOL_MIN;
2093 KASSERT(wr->wr_blkhash == 0);
2094 #ifdef _KERNEL
2095 wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2096 if (atomic_inc_uint_nv(&wapbl_blk_pool_refcount) == 1) {
2097 pool_init(&wapbl_blk_pool, sizeof(struct wapbl_blk), 0, 0, 0,
2098 "wapblblkpl", &pool_allocator_nointr, IPL_NONE);
2099 }
2100 #else /* ! _KERNEL */
2101 /* Manually implement hashinit */
2102 {
2103 int i;
2104 unsigned long hashsize;
2105 for (hashsize = 1; hashsize < size; hashsize <<= 1)
2106 continue;
2107 wr->wr_blkhash = wapbl_malloc(hashsize * sizeof(*wr->wr_blkhash));
2108 for (i = 0; i < wr->wr_blkhashmask; i++)
2109 LIST_INIT(&wr->wr_blkhash[i]);
2110 wr->wr_blkhashmask = hashsize - 1;
2111 }
2112 #endif /* ! _KERNEL */
2113 }
2114
2115 static void
2116 wapbl_blkhash_free(struct wapbl_replay *wr)
2117 {
2118 KASSERT(wr->wr_blkhashcnt == 0);
2119 #ifdef _KERNEL
2120 hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2121 if (atomic_dec_uint_nv(&wapbl_blk_pool_refcount) == 0) {
2122 pool_destroy(&wapbl_blk_pool);
2123 }
2124 #else /* ! _KERNEL */
2125 wapbl_free(wr->wr_blkhash);
2126 #endif /* ! _KERNEL */
2127 }
2128
2129 static struct wapbl_blk *
2130 wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2131 {
2132 struct wapbl_blk_head *wbh;
2133 struct wapbl_blk *wb;
2134 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2135 LIST_FOREACH(wb, wbh, wb_hash) {
2136 if (blk == wb->wb_blk)
2137 return wb;
2138 }
2139 return 0;
2140 }
2141
2142 static void
2143 wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2144 {
2145 struct wapbl_blk_head *wbh;
2146 struct wapbl_blk *wb;
2147 wb = wapbl_blkhash_get(wr, blk);
2148 if (wb) {
2149 KASSERT(wb->wb_blk == blk);
2150 wb->wb_off = off;
2151 } else {
2152 #ifdef _KERNEL
2153 wb = pool_get(&wapbl_blk_pool, PR_WAITOK);
2154 #else /* ! _KERNEL */
2155 wb = wapbl_malloc(sizeof(*wb));
2156 #endif /* ! _KERNEL */
2157 wb->wb_blk = blk;
2158 wb->wb_off = off;
2159 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2160 LIST_INSERT_HEAD(wbh, wb, wb_hash);
2161 wr->wr_blkhashcnt++;
2162 }
2163 }
2164
2165 static void
2166 wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2167 {
2168 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2169 if (wb) {
2170 KASSERT(wr->wr_blkhashcnt > 0);
2171 wr->wr_blkhashcnt--;
2172 LIST_REMOVE(wb, wb_hash);
2173 #ifdef _KERNEL
2174 pool_put(&wapbl_blk_pool, wb);
2175 #else /* ! _KERNEL */
2176 wapbl_free(wb);
2177 #endif /* ! _KERNEL */
2178 }
2179 }
2180
2181 static void
2182 wapbl_blkhash_clear(struct wapbl_replay *wr)
2183 {
2184 int i;
2185 for (i = 0; i <= wr->wr_blkhashmask; i++) {
2186 struct wapbl_blk *wb;
2187
2188 while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2189 KASSERT(wr->wr_blkhashcnt > 0);
2190 wr->wr_blkhashcnt--;
2191 LIST_REMOVE(wb, wb_hash);
2192 #ifdef _KERNEL
2193 pool_put(&wapbl_blk_pool, wb);
2194 #else /* ! _KERNEL */
2195 wapbl_free(wb);
2196 #endif /* ! _KERNEL */
2197 }
2198 }
2199 KASSERT(wr->wr_blkhashcnt == 0);
2200 }
2201
2202 /****************************************************************/
2203
2204 static int
2205 wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2206 {
2207 size_t slen;
2208 struct wapbl_wc_header *wc = &wr->wr_wc_header;
2209 off_t off = *offp;
2210 int error;
2211
2212 KASSERT(((len >> wc->wc_log_dev_bshift) <<
2213 wc->wc_log_dev_bshift) == len);
2214 if (off < wc->wc_circ_off)
2215 off = wc->wc_circ_off;
2216 slen = wc->wc_circ_off + wc->wc_circ_size - off;
2217 if (slen < len) {
2218 error = wapbl_read(data, slen, wr->wr_devvp,
2219 wr->wr_logpbn + (off >> wc->wc_log_dev_bshift));
2220 if (error)
2221 return error;
2222 data = (uint8_t *)data + slen;
2223 len -= slen;
2224 off = wc->wc_circ_off;
2225 }
2226 error = wapbl_read(data, len, wr->wr_devvp,
2227 wr->wr_logpbn + (off >> wc->wc_log_dev_bshift));
2228 if (error)
2229 return error;
2230 off += len;
2231 if (off >= wc->wc_circ_off + wc->wc_circ_size)
2232 off = wc->wc_circ_off;
2233 *offp = off;
2234 return 0;
2235 }
2236
2237 static void
2238 wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2239 {
2240 size_t slen;
2241 struct wapbl_wc_header *wc = &wr->wr_wc_header;
2242 off_t off = *offp;
2243
2244 KASSERT(((len >> wc->wc_log_dev_bshift) <<
2245 wc->wc_log_dev_bshift) == len);
2246
2247 if (off < wc->wc_circ_off)
2248 off = wc->wc_circ_off;
2249 slen = wc->wc_circ_off + wc->wc_circ_size - off;
2250 if (slen < len) {
2251 len -= slen;
2252 off = wc->wc_circ_off;
2253 }
2254 off += len;
2255 if (off >= wc->wc_circ_off + wc->wc_circ_size)
2256 off = wc->wc_circ_off;
2257 *offp = off;
2258 }
2259
2260 /****************************************************************/
2261
2262 int
2263 wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2264 daddr_t off, size_t count, size_t blksize)
2265 {
2266 struct wapbl_replay *wr;
2267 int error;
2268 struct vnode *devvp;
2269 daddr_t logpbn;
2270 uint8_t *scratch;
2271 struct wapbl_wc_header *wch;
2272 struct wapbl_wc_header *wch2;
2273 /* Use this until we read the actual log header */
2274 int log_dev_bshift = DEV_BSHIFT;
2275 size_t used;
2276
2277 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2278 ("wapbl_replay_start: vp=%p off=%"PRId64 " count=%zu blksize=%zu\n",
2279 vp, off, count, blksize));
2280
2281 if (off < 0)
2282 return EINVAL;
2283
2284 if (blksize < DEV_BSIZE)
2285 return EINVAL;
2286 if (blksize % DEV_BSIZE)
2287 return EINVAL;
2288
2289 #ifdef _KERNEL
2290 #if 0
2291 /* XXX vp->v_size isn't reliably set for VBLK devices,
2292 * especially root. However, we might still want to verify
2293 * that the full load is readable */
2294 if ((off + count) * blksize > vp->v_size)
2295 return EINVAL;
2296 #endif
2297
2298 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2299 return error;
2300 }
2301 #else /* ! _KERNEL */
2302 devvp = vp;
2303 logpbn = off;
2304 #endif /* ! _KERNEL */
2305
2306 scratch = wapbl_malloc(MAXBSIZE);
2307
2308 error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, logpbn);
2309 if (error)
2310 goto errout;
2311
2312 wch = (struct wapbl_wc_header *)scratch;
2313 wch2 =
2314 (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
2315 /* XXX verify checksums and magic numbers */
2316 if (wch->wc_type != WAPBL_WC_HEADER) {
2317 printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
2318 error = EFTYPE;
2319 goto errout;
2320 }
2321
2322 if (wch2->wc_generation > wch->wc_generation)
2323 wch = wch2;
2324
2325 wr = wapbl_calloc(1, sizeof(*wr));
2326
2327 wr->wr_logvp = vp;
2328 wr->wr_devvp = devvp;
2329 wr->wr_logpbn = logpbn;
2330
2331 wr->wr_scratch = scratch;
2332
2333 memcpy(&wr->wr_wc_header, wch, sizeof(wr->wr_wc_header));
2334
2335 used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
2336
2337 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2338 ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
2339 " len=%"PRId64" used=%zu\n",
2340 wch->wc_head, wch->wc_tail, wch->wc_circ_off,
2341 wch->wc_circ_size, used));
2342
2343 wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
2344 error = wapbl_replay_prescan(wr);
2345 if (error) {
2346 wapbl_replay_stop(wr);
2347 wapbl_replay_free(wr);
2348 return error;
2349 }
2350
2351 error = wapbl_replay_get_inodes(wr);
2352 if (error) {
2353 wapbl_replay_stop(wr);
2354 wapbl_replay_free(wr);
2355 return error;
2356 }
2357
2358 *wrp = wr;
2359 return 0;
2360
2361 errout:
2362 wapbl_free(scratch);
2363 return error;
2364 }
2365
2366 void
2367 wapbl_replay_stop(struct wapbl_replay *wr)
2368 {
2369
2370 if (!wapbl_replay_isopen(wr))
2371 return;
2372
2373 WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
2374
2375 wapbl_free(wr->wr_scratch);
2376 wr->wr_scratch = 0;
2377
2378 wr->wr_logvp = 0;
2379
2380 wapbl_blkhash_clear(wr);
2381 wapbl_blkhash_free(wr);
2382 }
2383
2384 void
2385 wapbl_replay_free(struct wapbl_replay *wr)
2386 {
2387
2388 KDASSERT(!wapbl_replay_isopen(wr));
2389
2390 if (wr->wr_inodes)
2391 wapbl_free(wr->wr_inodes);
2392 wapbl_free(wr);
2393 }
2394
2395 #ifdef _KERNEL
2396 int
2397 wapbl_replay_isopen1(struct wapbl_replay *wr)
2398 {
2399
2400 return wapbl_replay_isopen(wr);
2401 }
2402 #endif
2403
2404 static int
2405 wapbl_replay_prescan(struct wapbl_replay *wr)
2406 {
2407 off_t off;
2408 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2409 int error;
2410
2411 int logblklen = 1<<wch->wc_log_dev_bshift;
2412 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2413
2414 wapbl_blkhash_clear(wr);
2415
2416 off = wch->wc_tail;
2417 while (off != wch->wc_head) {
2418 struct wapbl_wc_null *wcn;
2419 off_t saveoff = off;
2420 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2421 if (error)
2422 goto errout;
2423 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2424 switch (wcn->wc_type) {
2425 case WAPBL_WC_BLOCKS:
2426 {
2427 struct wapbl_wc_blocklist *wc =
2428 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2429 int i;
2430 for (i = 0; i < wc->wc_blkcount; i++) {
2431 int j, n;
2432 /*
2433 * Enter each physical block into the
2434 * hashtable independently
2435 */
2436 n = wc->wc_blocks[i].wc_dlen >>
2437 wch->wc_fs_dev_bshift;
2438 for (j = 0; j < n; j++) {
2439 wapbl_blkhash_ins(wr,
2440 wc->wc_blocks[i].wc_daddr + j,
2441 off);
2442 wapbl_circ_advance(wr,
2443 fsblklen, &off);
2444 }
2445 }
2446 }
2447 break;
2448
2449 case WAPBL_WC_REVOCATIONS:
2450 {
2451 struct wapbl_wc_blocklist *wc =
2452 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2453 int i;
2454 for (i = 0; i < wc->wc_blkcount; i++) {
2455 int j, n;
2456 /*
2457 * Remove any blocks found from the
2458 * hashtable
2459 */
2460 n = wc->wc_blocks[i].wc_dlen >>
2461 wch->wc_fs_dev_bshift;
2462 for (j = 0; j < n; j++) {
2463 wapbl_blkhash_rem(wr,
2464 wc->wc_blocks[i].wc_daddr + j);
2465 }
2466 }
2467 }
2468 break;
2469
2470 case WAPBL_WC_INODES:
2471 {
2472 struct wapbl_wc_inodelist *wc =
2473 (struct wapbl_wc_inodelist *)wr->wr_scratch;
2474 /*
2475 * Keep track of where we found this so we
2476 * can use it later
2477 */
2478 if (wc->wc_clear) {
2479 wr->wr_inodestail = saveoff;
2480 wr->wr_inodescnt = 0;
2481 }
2482 if (wr->wr_inodestail)
2483 wr->wr_inodeshead = off;
2484 wr->wr_inodescnt += wc->wc_inocnt;
2485 }
2486 break;
2487 default:
2488 printf("Unrecognized wapbl type: 0x%08x\n",
2489 wcn->wc_type);
2490 error = EFTYPE;
2491 goto errout;
2492 }
2493 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2494 if (off != saveoff) {
2495 printf("wapbl_replay: corrupted records\n");
2496 error = EFTYPE;
2497 goto errout;
2498 }
2499 }
2500 return 0;
2501
2502 errout:
2503 wapbl_blkhash_clear(wr);
2504 return error;
2505 }
2506
2507 static int
2508 wapbl_replay_get_inodes(struct wapbl_replay *wr)
2509 {
2510 off_t off;
2511 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2512 int logblklen = 1<<wch->wc_log_dev_bshift;
2513 int cnt= 0;
2514
2515 KDASSERT(wapbl_replay_isopen(wr));
2516
2517 if (wr->wr_inodescnt == 0)
2518 return 0;
2519
2520 KASSERT(!wr->wr_inodes);
2521
2522 wr->wr_inodes = wapbl_malloc(wr->wr_inodescnt*sizeof(wr->wr_inodes[0]));
2523
2524 off = wr->wr_inodestail;
2525
2526 while (off != wr->wr_inodeshead) {
2527 struct wapbl_wc_null *wcn;
2528 int error;
2529 off_t saveoff = off;
2530 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2531 if (error) {
2532 wapbl_free(wr->wr_inodes);
2533 wr->wr_inodes = 0;
2534 return error;
2535 }
2536 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2537 switch (wcn->wc_type) {
2538 case WAPBL_WC_BLOCKS:
2539 case WAPBL_WC_REVOCATIONS:
2540 break;
2541 case WAPBL_WC_INODES:
2542 {
2543 struct wapbl_wc_inodelist *wc =
2544 (struct wapbl_wc_inodelist *)wr->wr_scratch;
2545 /*
2546 * Keep track of where we found this so we
2547 * can use it later
2548 */
2549 if (wc->wc_clear) {
2550 cnt = 0;
2551 }
2552 /* This memcpy assumes that wr_inodes is
2553 * laid out the same as wc_inodes. */
2554 memcpy(&wr->wr_inodes[cnt], wc->wc_inodes,
2555 wc->wc_inocnt*sizeof(wc->wc_inodes[0]));
2556 cnt += wc->wc_inocnt;
2557 }
2558 break;
2559 default:
2560 KASSERT(0);
2561 }
2562 off = saveoff;
2563 wapbl_circ_advance(wr, wcn->wc_len, &off);
2564 }
2565 KASSERT(cnt == wr->wr_inodescnt);
2566 return 0;
2567 }
2568
2569 #ifdef DEBUG
2570 int
2571 wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
2572 {
2573 off_t off;
2574 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2575 int mismatchcnt = 0;
2576 int logblklen = 1<<wch->wc_log_dev_bshift;
2577 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2578 void *scratch1 = wapbl_malloc(MAXBSIZE);
2579 void *scratch2 = wapbl_malloc(MAXBSIZE);
2580 int error = 0;
2581
2582 KDASSERT(wapbl_replay_isopen(wr));
2583
2584 off = wch->wc_tail;
2585 while (off != wch->wc_head) {
2586 struct wapbl_wc_null *wcn;
2587 #ifdef DEBUG
2588 off_t saveoff = off;
2589 #endif
2590 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2591 if (error)
2592 goto out;
2593 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2594 switch (wcn->wc_type) {
2595 case WAPBL_WC_BLOCKS:
2596 {
2597 struct wapbl_wc_blocklist *wc =
2598 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2599 int i;
2600 for (i = 0; i < wc->wc_blkcount; i++) {
2601 int foundcnt = 0;
2602 int dirtycnt = 0;
2603 int j, n;
2604 /*
2605 * Check each physical block into the
2606 * hashtable independently
2607 */
2608 n = wc->wc_blocks[i].wc_dlen >>
2609 wch->wc_fs_dev_bshift;
2610 for (j = 0; j < n; j++) {
2611 struct wapbl_blk *wb =
2612 wapbl_blkhash_get(wr,
2613 wc->wc_blocks[i].wc_daddr + j);
2614 if (wb && (wb->wb_off == off)) {
2615 foundcnt++;
2616 error =
2617 wapbl_circ_read(wr,
2618 scratch1, fsblklen,
2619 &off);
2620 if (error)
2621 goto out;
2622 error =
2623 wapbl_read(scratch2,
2624 fsblklen, fsdevvp,
2625 wb->wb_blk);
2626 if (error)
2627 goto out;
2628 if (memcmp(scratch1,
2629 scratch2,
2630 fsblklen)) {
2631 printf(
2632 "wapbl_verify: mismatch block %"PRId64" at off %"PRIdMAX"\n",
2633 wb->wb_blk, (intmax_t)off);
2634 dirtycnt++;
2635 mismatchcnt++;
2636 }
2637 } else {
2638 wapbl_circ_advance(wr,
2639 fsblklen, &off);
2640 }
2641 }
2642 #if 0
2643 /*
2644 * If all of the blocks in an entry
2645 * are clean, then remove all of its
2646 * blocks from the hashtable since they
2647 * never will need replay.
2648 */
2649 if ((foundcnt != 0) &&
2650 (dirtycnt == 0)) {
2651 off = saveoff;
2652 wapbl_circ_advance(wr,
2653 logblklen, &off);
2654 for (j = 0; j < n; j++) {
2655 struct wapbl_blk *wb =
2656 wapbl_blkhash_get(wr,
2657 wc->wc_blocks[i].wc_daddr + j);
2658 if (wb &&
2659 (wb->wb_off == off)) {
2660 wapbl_blkhash_rem(wr, wb->wb_blk);
2661 }
2662 wapbl_circ_advance(wr,
2663 fsblklen, &off);
2664 }
2665 }
2666 #endif
2667 }
2668 }
2669 break;
2670 case WAPBL_WC_REVOCATIONS:
2671 case WAPBL_WC_INODES:
2672 break;
2673 default:
2674 KASSERT(0);
2675 }
2676 #ifdef DEBUG
2677 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2678 KASSERT(off == saveoff);
2679 #endif
2680 }
2681 out:
2682 wapbl_free(scratch1);
2683 wapbl_free(scratch2);
2684 if (!error && mismatchcnt)
2685 error = EFTYPE;
2686 return error;
2687 }
2688 #endif
2689
2690 int
2691 wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
2692 {
2693 off_t off;
2694 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2695 int logblklen = 1<<wch->wc_log_dev_bshift;
2696 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2697 void *scratch1 = wapbl_malloc(MAXBSIZE);
2698 int error = 0;
2699
2700 KDASSERT(wapbl_replay_isopen(wr));
2701
2702 /*
2703 * This parses the journal for replay, although it could
2704 * just as easily walk the hashtable instead.
2705 */
2706
2707 off = wch->wc_tail;
2708 while (off != wch->wc_head) {
2709 struct wapbl_wc_null *wcn;
2710 #ifdef DEBUG
2711 off_t saveoff = off;
2712 #endif
2713 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2714 if (error)
2715 goto out;
2716 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2717 switch (wcn->wc_type) {
2718 case WAPBL_WC_BLOCKS:
2719 {
2720 struct wapbl_wc_blocklist *wc =
2721 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2722 int i;
2723 for (i = 0; i < wc->wc_blkcount; i++) {
2724 int j, n;
2725 /*
2726 * Check each physical block against
2727 * the hashtable independently
2728 */
2729 n = wc->wc_blocks[i].wc_dlen >>
2730 wch->wc_fs_dev_bshift;
2731 for (j = 0; j < n; j++) {
2732 struct wapbl_blk *wb =
2733 wapbl_blkhash_get(wr,
2734 wc->wc_blocks[i].wc_daddr + j);
2735 if (wb && (wb->wb_off == off)) {
2736 error = wapbl_circ_read(
2737 wr, scratch1,
2738 fsblklen, &off);
2739 if (error)
2740 goto out;
2741 error =
2742 wapbl_write(scratch1,
2743 fsblklen, fsdevvp,
2744 wb->wb_blk);
2745 if (error)
2746 goto out;
2747 } else {
2748 wapbl_circ_advance(wr,
2749 fsblklen, &off);
2750 }
2751 }
2752 }
2753 }
2754 break;
2755 case WAPBL_WC_REVOCATIONS:
2756 case WAPBL_WC_INODES:
2757 break;
2758 default:
2759 KASSERT(0);
2760 }
2761 #ifdef DEBUG
2762 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2763 KASSERT(off == saveoff);
2764 #endif
2765 }
2766 out:
2767 wapbl_free(scratch1);
2768 return error;
2769 }
2770
2771 int
2772 wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
2773 {
2774 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2775 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2776
2777 KDASSERT(wapbl_replay_isopen(wr));
2778
2779 KASSERT((len % fsblklen) == 0);
2780
2781 while (len != 0) {
2782 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2783 if (wb) {
2784 off_t off = wb->wb_off;
2785 int error;
2786 error = wapbl_circ_read(wr, data, fsblklen, &off);
2787 if (error)
2788 return error;
2789 }
2790 data = (uint8_t *)data + fsblklen;
2791 len -= fsblklen;
2792 blk++;
2793 }
2794 return 0;
2795 }
2796