vfs_wapbl.c revision 1.1.2.4 1 /* $NetBSD: vfs_wapbl.c,v 1.1.2.4 2008/06/12 08:39:21 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2003,2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This implements file system independent write ahead filesystem logging.
34 */
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.1.2.4 2008/06/12 08:39:21 martin Exp $");
37
38 #include <sys/param.h>
39
40 #ifdef _KERNEL
41 #include <sys/param.h>
42 #include <sys/namei.h>
43 #include <sys/proc.h>
44 #include <sys/uio.h>
45 #include <sys/vnode.h>
46 #include <sys/file.h>
47 #include <sys/malloc.h>
48 #include <sys/resourcevar.h>
49 #include <sys/conf.h>
50 #include <sys/mount.h>
51 #include <sys/kernel.h>
52 #include <sys/kauth.h>
53 #include <sys/mutex.h>
54 #include <sys/wapbl.h>
55
56 #if WAPBL_UVM_ALLOC
57 #include <uvm/uvm.h>
58 #endif
59
60 #include <miscfs/specfs/specdev.h>
61
62 MALLOC_JUSTDEFINE(M_WAPBL, "wapbl", "write-ahead physical block logging");
63 #define wapbl_malloc(s) malloc((s), M_WAPBL, M_WAITOK)
64 #define wapbl_free(a) free((a), M_WAPBL)
65 #define wapbl_calloc(n, s) malloc((n)*(s), M_WAPBL, M_WAITOK | M_ZERO)
66
67 #else /* !_KERNEL */
68 #include <assert.h>
69 #include <errno.h>
70 #include <stdio.h>
71 #include <stdbool.h>
72 #include <stdlib.h>
73 #include <string.h>
74
75 #include <sys/time.h>
76 #include <sys/wapbl.h>
77
78 #define KDASSERT(x) assert(x)
79 #define KASSERT(x) assert(x)
80 #define wapbl_malloc(s) malloc(s)
81 #define wapbl_free(a) free(a)
82 #define wapbl_calloc(n, s) calloc((n), (s))
83
84 #endif /* !_KERNEL */
85
86 /*
87 * INTERNAL DATA STRUCTURES
88 */
89
90 /*
91 * This structure holds per-mount log information.
92 *
93 * Legend: a = atomic access only
94 * r = read-only after init
95 * l = rwlock held
96 * m = mutex held
97 * u = unlocked access ok
98 * b = bufcache_lock held
99 */
100 struct wapbl {
101 struct vnode *wl_logvp; /* r: log here */
102 struct vnode *wl_devvp; /* r: log on this device */
103 struct mount *wl_mount; /* r: mountpoint wl is associated with */
104 daddr_t wl_logpbn; /* r: Physical block number of start of log */
105 int wl_log_dev_bshift; /* r: logarithm of device block size of log
106 device */
107 int wl_fs_dev_bshift; /* r: logarithm of device block size of
108 filesystem device */
109
110 unsigned wl_lock_count; /* a: Count of transactions in progress */
111
112 size_t wl_circ_size; /* r: Number of bytes in buffer of log */
113 size_t wl_circ_off; /* r: Number of bytes reserved at start */
114
115 size_t wl_bufcount_max; /* r: Number of buffers reserved for log */
116 size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */
117
118 off_t wl_head; /* l: Byte offset of log head */
119 off_t wl_tail; /* l: Byte offset of log tail */
120 /*
121 * head == tail == 0 means log is empty
122 * head == tail != 0 means log is full
123 * see assertions in wapbl_advance() for other boundary conditions.
124 * only truncate moves the tail, except when flush sets it to
125 * wl_header_size only flush moves the head, except when truncate
126 * sets it to 0.
127 */
128
129 struct wapbl_wc_header *wl_wc_header; /* l */
130 void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */
131
132 kmutex_t wl_mtx; /* u: short-term lock */
133 krwlock_t wl_rwlock; /* u: File system transaction lock */
134
135 /*
136 * Must be held while accessing
137 * wl_count or wl_bufs or head or tail
138 */
139
140 /*
141 * Callback called from within the flush routine to flush any extra
142 * bits. Note that flush may be skipped without calling this if
143 * there are no outstanding buffers in the transaction.
144 */
145 wapbl_flush_fn_t wl_flush; /* r */
146 wapbl_flush_fn_t wl_flush_abort;/* r */
147
148 size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */
149 size_t wl_bufcount; /* m: Count of buffers in wl_bufs */
150 size_t wl_bcount; /* m: Total bcount of wl_bufs */
151
152 LIST_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
153
154 kcondvar_t wl_reclaimable_cv; /* m (obviously) */
155 size_t wl_reclaimable_bytes; /* m: Amount of space available for
156 reclamation by truncate */
157 int wl_error_count; /* m: # of wl_entries with errors */
158 size_t wl_reserved_bytes; /* never truncate log smaller than this */
159
160 #ifdef WAPBL_DEBUG_BUFBYTES
161 size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
162 #endif
163
164 daddr_t *wl_deallocblks;/* l: address of block */
165 int *wl_dealloclens; /* l: size of block (fragments, kom ihg) */
166 int wl_dealloccnt; /* l: total count */
167 int wl_dealloclim; /* l: max count */
168
169 /* hashtable of inode numbers for allocated but unlinked inodes */
170 /* synch ??? */
171 LIST_HEAD(wapbl_ino_head, wapbl_ino) *wl_inohash;
172 u_long wl_inohashmask;
173 int wl_inohashcnt;
174
175 SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* On disk transaction
176 accounting */
177 };
178
179 #ifdef WAPBL_DEBUG_PRINT
180 int wapbl_debug_print = WAPBL_DEBUG_PRINT;
181 #endif
182
183 /****************************************************************/
184 #ifdef _KERNEL
185
186 #ifdef WAPBL_DEBUG
187 struct wapbl *wapbl_debug_wl;
188 #endif
189
190 static int wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail);
191 static int wapbl_write_blocks(struct wapbl *wl, off_t *offp);
192 static int wapbl_write_revocations(struct wapbl *wl, off_t *offp);
193 static int wapbl_write_inodes(struct wapbl *wl, off_t *offp);
194 #endif /* _KERNEL */
195
196 static int wapbl_replay_prescan(struct wapbl_replay *wr);
197 static int wapbl_replay_get_inodes(struct wapbl_replay *wr);
198
199 static __inline size_t wapbl_space_free(size_t avail, off_t head,
200 off_t tail);
201 static __inline size_t wapbl_space_used(size_t avail, off_t head,
202 off_t tail);
203
204 #ifdef _KERNEL
205
206 #define WAPBL_INODETRK_SIZE 83
207 static int wapbl_ino_pool_refcount;
208 static struct pool wapbl_ino_pool;
209 struct wapbl_ino {
210 LIST_ENTRY(wapbl_ino) wi_hash;
211 ino_t wi_ino;
212 mode_t wi_mode;
213 };
214
215 static kmutex_t wapbl_global_mtx;
216
217 static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
218 static void wapbl_inodetrk_free(struct wapbl *wl);
219 static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
220
221 static size_t wapbl_transaction_len(struct wapbl *wl);
222 static __inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
223
224 /*
225 * This is useful for debugging. If set, the log will
226 * only be truncated when necessary.
227 */
228 int wapbl_lazy_truncate = 0;
229
230 struct wapbl_ops wapbl_ops = {
231 .wo_wapbl_discard = wapbl_discard,
232 .wo_wapbl_replay_isopen = wapbl_replay_isopen1,
233 .wo_wapbl_replay_read = wapbl_replay_read,
234 .wo_wapbl_add_buf = wapbl_add_buf,
235 .wo_wapbl_remove_buf = wapbl_remove_buf,
236 .wo_wapbl_resize_buf = wapbl_resize_buf,
237 .wo_wapbl_begin = wapbl_begin,
238 .wo_wapbl_end = wapbl_end,
239 .wo_wapbl_junlock_assert= wapbl_junlock_assert,
240
241 /* XXX: the following is only used to say "this is a wapbl buf" */
242 .wo_wapbl_biodone = wapbl_biodone,
243 };
244
245 void
246 wapbl_init()
247 {
248
249 mutex_init(&wapbl_global_mtx, MUTEX_DEFAULT, IPL_NONE);
250 malloc_type_attach(M_WAPBL);
251 }
252
253 int
254 wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
255 daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
256 wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
257 {
258 struct wapbl *wl;
259 struct vnode *devvp;
260 daddr_t logpbn;
261 int error;
262 int log_dev_bshift = DEV_BSHIFT;
263 int fs_dev_bshift = DEV_BSHIFT;
264 int run;
265
266 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_start: vp=%p off=%" PRId64
267 " count=%zu blksize=%zu\n", vp, off, count, blksize));
268
269 if (log_dev_bshift > fs_dev_bshift) {
270 WAPBL_PRINTF(WAPBL_PRINT_OPEN,
271 ("wapbl: log device's block size cannot be larger "
272 "than filesystem's\n"));
273 /*
274 * Not currently implemented, although it could be if
275 * needed someday.
276 */
277 return ENOSYS;
278 }
279
280 if (off < 0)
281 return EINVAL;
282
283 if (blksize < DEV_BSIZE)
284 return EINVAL;
285 if (blksize % DEV_BSIZE)
286 return EINVAL;
287
288 /* XXXTODO: verify that the full load is writable */
289
290 /*
291 * XXX check for minimum log size
292 * minimum is governed by minimum amount of space
293 * to complete a transaction. (probably truncate)
294 */
295 /* XXX for now pick something minimal */
296 if ((count * blksize) < MAXPHYS) {
297 return ENOSPC;
298 }
299
300 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
301 return error;
302 }
303
304 wl = wapbl_calloc(1, sizeof(*wl));
305 rw_init(&wl->wl_rwlock);
306 mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
307 cv_init(&wl->wl_reclaimable_cv, "wapblrec");
308 LIST_INIT(&wl->wl_bufs);
309 SIMPLEQ_INIT(&wl->wl_entries);
310
311 wl->wl_logvp = vp;
312 wl->wl_devvp = devvp;
313 wl->wl_mount = mp;
314 wl->wl_logpbn = logpbn;
315 wl->wl_log_dev_bshift = log_dev_bshift;
316 wl->wl_fs_dev_bshift = fs_dev_bshift;
317
318 wl->wl_flush = flushfn;
319 wl->wl_flush_abort = flushabortfn;
320
321 /* Reserve two log device blocks for the commit headers */
322 wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
323 wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
324 /* truncate the log usage to a multiple of log_dev_bshift */
325 wl->wl_circ_size >>= wl->wl_log_dev_bshift;
326 wl->wl_circ_size <<= wl->wl_log_dev_bshift;
327
328 /*
329 * wl_bufbytes_max limits the size of the in memory transaction space.
330 * - Since buffers are allocated and accounted for in units of
331 * PAGE_SIZE it is required to be a multiple of PAGE_SIZE
332 * (i.e. 1<<PAGE_SHIFT)
333 * - Since the log device has to be written in units of
334 * 1<<wl_log_dev_bshift it is required to be a mulitple of
335 * 1<<wl_log_dev_bshift.
336 * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
337 * it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
338 * Therefore it must be multiple of the least common multiple of those
339 * three quantities. Fortunately, all of those quantities are
340 * guaranteed to be a power of two, and the least common multiple of
341 * a set of numbers which are all powers of two is simply the maximum
342 * of those numbers. Finally, the maximum logarithm of a power of two
343 * is the same as the log of the maximum power of two. So we can do
344 * the following operations to size wl_bufbytes_max:
345 */
346
347 /* XXX fix actual number of pages reserved per filesystem. */
348 wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
349
350 /* Round wl_bufbytes_max to the largest power of two constraint */
351 wl->wl_bufbytes_max >>= PAGE_SHIFT;
352 wl->wl_bufbytes_max <<= PAGE_SHIFT;
353 wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
354 wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
355 wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
356 wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
357
358 /* XXX maybe use filesystem fragment size instead of 1024 */
359 /* XXX fix actual number of buffers reserved per filesystem. */
360 wl->wl_bufcount_max = (nbuf / 2) * 1024;
361
362 /* XXX tie this into resource estimation */
363 wl->wl_dealloclim = 2 * btodb(wl->wl_bufbytes_max);
364
365 #if WAPBL_UVM_ALLOC
366 wl->wl_deallocblks = (void *) uvm_km_zalloc(kernel_map,
367 round_page(sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim));
368 KASSERT(wl->wl_deallocblks != NULL);
369 wl->wl_dealloclens = (void *) uvm_km_zalloc(kernel_map,
370 round_page(sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim));
371 KASSERT(wl->wl_dealloclens != NULL);
372 #else
373 wl->wl_deallocblks = wapbl_malloc(sizeof(*wl->wl_deallocblks) *
374 wl->wl_dealloclim);
375 wl->wl_dealloclens = wapbl_malloc(sizeof(*wl->wl_dealloclens) *
376 wl->wl_dealloclim);
377 #endif
378
379 wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
380
381 /* Initialize the commit header */
382 {
383 struct wapbl_wc_header *wc;
384 size_t len = 1<<wl->wl_log_dev_bshift;
385 wc = wapbl_calloc(1, len);
386 wc->wc_type = WAPBL_WC_HEADER;
387 wc->wc_len = len;
388 wc->wc_circ_off = wl->wl_circ_off;
389 wc->wc_circ_size = wl->wl_circ_size;
390 /* XXX wc->wc_fsid */
391 wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
392 wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
393 wl->wl_wc_header = wc;
394 wl->wl_wc_scratch = wapbl_malloc(len);
395 }
396
397 /*
398 * if there was an existing set of unlinked but
399 * allocated inodes, preserve it in the new
400 * log.
401 */
402 if (wr && wr->wr_inodescnt) {
403 int i;
404
405 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
406 ("wapbl_start: reusing log with %d inodes\n",
407 wr->wr_inodescnt));
408
409 /*
410 * Its only valid to reuse the replay log if its
411 * the same as the new log we just opened.
412 */
413 KDASSERT(!wapbl_replay_isopen(wr));
414 KASSERT(devvp->v_rdev == wr->wr_devvp->v_rdev);
415 KASSERT(logpbn == wr->wr_logpbn);
416 KASSERT(wl->wl_circ_size == wr->wr_wc_header.wc_circ_size);
417 KASSERT(wl->wl_circ_off == wr->wr_wc_header.wc_circ_off);
418 KASSERT(wl->wl_log_dev_bshift ==
419 wr->wr_wc_header.wc_log_dev_bshift);
420 KASSERT(wl->wl_fs_dev_bshift ==
421 wr->wr_wc_header.wc_fs_dev_bshift);
422
423 wl->wl_wc_header->wc_generation =
424 wr->wr_wc_header.wc_generation + 1;
425
426 for (i = 0; i < wr->wr_inodescnt; i++)
427 wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
428 wr->wr_inodes[i].wr_imode);
429
430 /* Make sure new transaction won't overwrite old inodes list */
431 KDASSERT(wapbl_transaction_len(wl) <=
432 wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
433 wr->wr_inodestail));
434
435 wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
436 wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
437 wapbl_transaction_len(wl);
438
439 error = wapbl_write_inodes(wl, &wl->wl_head);
440 if (error)
441 goto errout;
442
443 KASSERT(wl->wl_head != wl->wl_tail);
444 KASSERT(wl->wl_head != 0);
445 }
446
447 error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
448 if (error) {
449 goto errout;
450 }
451
452 *wlp = wl;
453 #if defined(WAPBL_DEBUG)
454 wapbl_debug_wl = wl;
455 #endif
456
457 return 0;
458 errout:
459 wapbl_discard(wl);
460 wapbl_free(wl->wl_wc_scratch);
461 wapbl_free(wl->wl_wc_header);
462 #if WAPBL_UVM_ALLOC
463 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_deallocblks,
464 round_page(sizeof(*wl->wl_deallocblks *
465 wl->wl_dealloclim)));
466 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_dealloclens,
467 round_page(sizeof(*wl->wl_dealloclens *
468 wl->wl_dealloclim)));
469 #else
470 wapbl_free(wl->wl_deallocblks);
471 wapbl_free(wl->wl_dealloclens);
472 #endif
473 wapbl_inodetrk_free(wl);
474 wapbl_free(wl);
475
476 return error;
477 }
478
479 /*
480 * Like wapbl_flush, only discards the transaction
481 * completely
482 */
483
484 void
485 wapbl_discard(struct wapbl *wl)
486 {
487 struct wapbl_entry *we;
488 struct buf *bp;
489 int i;
490
491 /*
492 * XXX we may consider using upgrade here
493 * if we want to call flush from inside a transaction
494 */
495 rw_enter(&wl->wl_rwlock, RW_WRITER);
496 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
497 wl->wl_dealloccnt);
498
499 #ifdef WAPBL_DEBUG_PRINT
500 {
501 struct wapbl_entry *we;
502 pid_t pid = -1;
503 lwpid_t lid = -1;
504 if (curproc)
505 pid = curproc->p_pid;
506 if (curlwp)
507 lid = curlwp->l_lid;
508 #ifdef WAPBL_DEBUG_BUFBYTES
509 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
510 ("wapbl_discard: thread %d.%d discarding "
511 "transaction\n"
512 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
513 "deallocs=%d inodes=%d\n"
514 "\terrcnt = %u, reclaimable=%zu reserved=%zu "
515 "unsynced=%zu\n",
516 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
517 wl->wl_bcount, wl->wl_dealloccnt,
518 wl->wl_inohashcnt, wl->wl_error_count,
519 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
520 wl->wl_unsynced_bufbytes));
521 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
522 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
523 ("\tentry: bufcount = %zu, reclaimable = %zu, "
524 "error = %d, unsynced = %zu\n",
525 we->we_bufcount, we->we_reclaimable_bytes,
526 we->we_error, we->we_unsynced_bufbytes));
527 }
528 #else /* !WAPBL_DEBUG_BUFBYTES */
529 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
530 ("wapbl_discard: thread %d.%d discarding transaction\n"
531 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
532 "deallocs=%d inodes=%d\n"
533 "\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
534 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
535 wl->wl_bcount, wl->wl_dealloccnt,
536 wl->wl_inohashcnt, wl->wl_error_count,
537 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes));
538 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
539 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
540 ("\tentry: bufcount = %zu, reclaimable = %zu, "
541 "error = %d\n",
542 we->we_bufcount, we->we_reclaimable_bytes,
543 we->we_error));
544 }
545 #endif /* !WAPBL_DEBUG_BUFBYTES */
546 }
547 #endif /* WAPBL_DEBUG_PRINT */
548
549 for (i = 0; i <= wl->wl_inohashmask; i++) {
550 struct wapbl_ino_head *wih;
551 struct wapbl_ino *wi;
552
553 wih = &wl->wl_inohash[i];
554 while ((wi = LIST_FIRST(wih)) != NULL) {
555 LIST_REMOVE(wi, wi_hash);
556 pool_put(&wapbl_ino_pool, wi);
557 KASSERT(wl->wl_inohashcnt > 0);
558 wl->wl_inohashcnt--;
559 }
560 }
561
562 /*
563 * clean buffer list
564 */
565 mutex_enter(&bufcache_lock);
566 mutex_enter(&wl->wl_mtx);
567 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
568 if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
569 /*
570 * The buffer will be unlocked and
571 * removed from the transaction in brelse
572 */
573 mutex_exit(&wl->wl_mtx);
574 brelsel(bp, 0);
575 mutex_enter(&wl->wl_mtx);
576 }
577 }
578 mutex_exit(&wl->wl_mtx);
579 mutex_exit(&bufcache_lock);
580
581 /*
582 * Remove references to this wl from wl_entries, free any which
583 * no longer have buffers, others will be freed in wapbl_biodone
584 * when they no longer have any buffers.
585 */
586 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
587 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
588 /* XXX should we be accumulating wl_error_count
589 * and increasing reclaimable bytes ? */
590 we->we_wapbl = NULL;
591 if (we->we_bufcount == 0) {
592 #ifdef WAPBL_DEBUG_BUFBYTES
593 KASSERT(we->we_unsynced_bufbytes == 0);
594 #endif
595 wapbl_free(we);
596 }
597 }
598
599 /* Discard list of deallocs */
600 wl->wl_dealloccnt = 0;
601 /* XXX should we clear wl_reserved_bytes? */
602
603 KASSERT(wl->wl_bufbytes == 0);
604 KASSERT(wl->wl_bcount == 0);
605 KASSERT(wl->wl_bufcount == 0);
606 KASSERT(LIST_EMPTY(&wl->wl_bufs));
607 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
608 KASSERT(wl->wl_inohashcnt == 0);
609
610 rw_exit(&wl->wl_rwlock);
611 }
612
613 int
614 wapbl_stop(struct wapbl *wl, int force)
615 {
616 struct vnode *vp;
617 int error;
618
619 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
620 error = wapbl_flush(wl, 1);
621 if (error) {
622 if (force)
623 wapbl_discard(wl);
624 else
625 return error;
626 }
627
628 /* Unlinked inodes persist after a flush */
629 if (wl->wl_inohashcnt) {
630 if (force) {
631 wapbl_discard(wl);
632 } else {
633 return EBUSY;
634 }
635 }
636
637 KASSERT(wl->wl_bufbytes == 0);
638 KASSERT(wl->wl_bcount == 0);
639 KASSERT(wl->wl_bufcount == 0);
640 KASSERT(LIST_EMPTY(&wl->wl_bufs));
641 KASSERT(wl->wl_dealloccnt == 0);
642 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
643 KASSERT(wl->wl_inohashcnt == 0);
644
645 vp = wl->wl_logvp;
646
647 wapbl_free(wl->wl_wc_scratch);
648 wapbl_free(wl->wl_wc_header);
649 #if WAPBL_UVM_ALLOC
650 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_deallocblks,
651 round_page(sizeof(*wl->wl_deallocblks *
652 wl->wl_dealloclim)));
653 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_dealloclens,
654 round_page(sizeof(*wl->wl_dealloclens *
655 wl->wl_dealloclim)));
656 #else
657 wapbl_free(wl->wl_deallocblks);
658 wapbl_free(wl->wl_dealloclens);
659 #endif
660 wapbl_inodetrk_free(wl);
661 wapbl_free(wl);
662
663 return 0;
664 }
665
666 static int
667 wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
668 {
669 struct pstats *pstats = curlwp->l_proc->p_stats;
670 struct buf *bp;
671 int error;
672
673 KASSERT((flags & ~(B_WRITE | B_READ)) == 0);
674 KASSERT(devvp->v_type == VBLK);
675
676 if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
677 devvp->v_numoutput++;
678 pstats->p_ru.ru_oublock++;
679 } else {
680 pstats->p_ru.ru_inblock++;
681 }
682
683 bp = getiobuf(devvp, true);
684 bp->b_flags = flags;
685 bp->b_cflags = BC_BUSY; /* silly & dubious */
686 bp->b_dev = devvp->v_rdev;
687 bp->b_data = data;
688 bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
689 bp->b_blkno = pbn;
690
691 WAPBL_PRINTF(WAPBL_PRINT_IO,
692 ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%x\n",
693 BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
694 bp->b_blkno, bp->b_dev));
695
696 VOP_STRATEGY(devvp, bp);
697
698 error = biowait(bp);
699 putiobuf(bp);
700
701 if (error) {
702 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
703 ("wapbl_doio: %s %zu bytes at block %" PRId64
704 " on dev 0x%x failed with error %d\n",
705 (((flags & (B_WRITE | B_READ)) == B_WRITE) ?
706 "write" : "read"),
707 len, pbn, devvp->v_rdev, error));
708 }
709
710 return error;
711 }
712
713 int
714 wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
715 {
716
717 return wapbl_doio(data, len, devvp, pbn, B_WRITE);
718 }
719
720 int
721 wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
722 {
723
724 return wapbl_doio(data, len, devvp, pbn, B_READ);
725 }
726
727 /*
728 * Off is byte offset returns new offset for next write
729 * handles log wraparound
730 */
731 static int
732 wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
733 {
734 size_t slen;
735 off_t off = *offp;
736 int error;
737
738 KDASSERT(((len >> wl->wl_log_dev_bshift) <<
739 wl->wl_log_dev_bshift) == len);
740
741 if (off < wl->wl_circ_off)
742 off = wl->wl_circ_off;
743 slen = wl->wl_circ_off + wl->wl_circ_size - off;
744 if (slen < len) {
745 error = wapbl_write(data, slen, wl->wl_devvp,
746 wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
747 if (error)
748 return error;
749 data = (uint8_t *)data + slen;
750 len -= slen;
751 off = wl->wl_circ_off;
752 }
753 error = wapbl_write(data, len, wl->wl_devvp,
754 wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
755 if (error)
756 return error;
757 off += len;
758 if (off >= wl->wl_circ_off + wl->wl_circ_size)
759 off = wl->wl_circ_off;
760 *offp = off;
761 return 0;
762 }
763
764 /****************************************************************/
765
766 int
767 wapbl_begin(struct wapbl *wl, const char *file, int line)
768 {
769 int doflush;
770 unsigned lockcount;
771 krw_t op;
772
773 KDASSERT(wl);
774
775 #ifdef WAPBL_DEBUG_SERIALIZE
776 op = RW_WRITER;
777 #else
778 op = RW_READER;
779 #endif
780
781 /*
782 * XXX this needs to be made much more sophisticated.
783 * perhaps each wapbl_begin could reserve a specified
784 * number of buffers and bytes.
785 */
786 mutex_enter(&wl->wl_mtx);
787 lockcount = wl->wl_lock_count;
788 doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) >
789 wl->wl_bufbytes_max / 2) ||
790 ((wl->wl_bufcount + (lockcount * 10)) >
791 wl->wl_bufcount_max / 2) ||
792 (wapbl_transaction_len(wl) > wl->wl_circ_size / 2);
793 mutex_exit(&wl->wl_mtx);
794
795 if (doflush) {
796 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
797 ("force flush lockcnt=%d bufbytes=%zu "
798 "(max=%zu) bufcount=%zu (max=%zu)\n",
799 lockcount, wl->wl_bufbytes,
800 wl->wl_bufbytes_max, wl->wl_bufcount,
801 wl->wl_bufcount_max));
802 }
803
804 if (doflush) {
805 int error = wapbl_flush(wl, 0);
806 if (error)
807 return error;
808 }
809
810 rw_enter(&wl->wl_rwlock, op);
811 mutex_enter(&wl->wl_mtx);
812 wl->wl_lock_count++;
813 mutex_exit(&wl->wl_mtx);
814
815 #if defined(WAPBL_DEBUG_PRINT) && defined(WAPBL_DEBUG_SERIALIZE)
816 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
817 ("wapbl_begin thread %d.%d with bufcount=%zu "
818 "bufbytes=%zu bcount=%zu at %s:%d\n",
819 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
820 wl->wl_bufbytes, wl->wl_bcount, file, line));
821 #endif
822
823 return 0;
824 }
825
826 void
827 wapbl_end(struct wapbl *wl)
828 {
829
830 #if defined(WAPBL_DEBUG_PRINT) && defined(WAPBL_DEBUG_SERIALIZE)
831 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
832 ("wapbl_end thread %d.%d with bufcount=%zu "
833 "bufbytes=%zu bcount=%zu\n",
834 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
835 wl->wl_bufbytes, wl->wl_bcount));
836 #endif
837
838 mutex_enter(&wl->wl_mtx);
839 KASSERT(wl->wl_lock_count > 0);
840 wl->wl_lock_count--;
841 mutex_exit(&wl->wl_mtx);
842
843 rw_exit(&wl->wl_rwlock);
844 }
845
846 void
847 wapbl_add_buf(struct wapbl *wl, struct buf * bp)
848 {
849
850 KASSERT(bp->b_cflags & BC_BUSY);
851 KASSERT(bp->b_vp);
852
853 wapbl_jlock_assert(wl);
854
855 #if 0
856 /*
857 * XXX this might be an issue for swapfiles.
858 * see uvm_swap.c:1702
859 *
860 * XXX2 why require it then? leap of semantics?
861 */
862 KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
863 #endif
864
865 mutex_enter(&wl->wl_mtx);
866 if (bp->b_flags & B_LOCKED) {
867 LIST_REMOVE(bp, b_wapbllist);
868 WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
869 ("wapbl_add_buf thread %d.%d re-adding buf %p "
870 "with %d bytes %d bcount\n",
871 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
872 bp->b_bcount));
873 } else {
874 /* unlocked by dirty buffers shouldn't exist */
875 KASSERT(!(bp->b_oflags & BO_DELWRI));
876 wl->wl_bufbytes += bp->b_bufsize;
877 wl->wl_bcount += bp->b_bcount;
878 wl->wl_bufcount++;
879 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
880 ("wapbl_add_buf thread %d.%d adding buf %p "
881 "with %d bytes %d bcount\n",
882 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
883 bp->b_bcount));
884 }
885 LIST_INSERT_HEAD(&wl->wl_bufs, bp, b_wapbllist);
886 mutex_exit(&wl->wl_mtx);
887
888 bp->b_flags |= B_LOCKED;
889 }
890
891 static void
892 wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
893 {
894
895 KASSERT(mutex_owned(&wl->wl_mtx));
896 KASSERT(bp->b_cflags & BC_BUSY);
897 wapbl_jlock_assert(wl);
898
899 #if 0
900 /*
901 * XXX this might be an issue for swapfiles.
902 * see uvm_swap.c:1725
903 *
904 * XXXdeux: see above
905 */
906 KASSERT((bp->b_flags & BC_NOCACHE) == 0);
907 #endif
908 KASSERT(bp->b_flags & B_LOCKED);
909
910 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
911 ("wapbl_remove_buf thread %d.%d removing buf %p with "
912 "%d bytes %d bcount\n",
913 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, bp->b_bcount));
914
915 KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
916 wl->wl_bufbytes -= bp->b_bufsize;
917 KASSERT(wl->wl_bcount >= bp->b_bcount);
918 wl->wl_bcount -= bp->b_bcount;
919 KASSERT(wl->wl_bufcount > 0);
920 wl->wl_bufcount--;
921 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
922 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
923 LIST_REMOVE(bp, b_wapbllist);
924
925 bp->b_flags &= ~B_LOCKED;
926 }
927
928 /* called from brelsel() in vfs_bio among other places */
929 void
930 wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
931 {
932
933 mutex_enter(&wl->wl_mtx);
934 wapbl_remove_buf_locked(wl, bp);
935 mutex_exit(&wl->wl_mtx);
936 }
937
938 void
939 wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
940 {
941
942 KASSERT(bp->b_cflags & BC_BUSY);
943
944 /*
945 * XXX: why does this depend on B_LOCKED? otherwise the buf
946 * is not for a transaction? if so, why is this called in the
947 * first place?
948 */
949 if (bp->b_flags & B_LOCKED) {
950 mutex_enter(&wl->wl_mtx);
951 wl->wl_bufbytes += bp->b_bufsize - oldsz;
952 wl->wl_bcount += bp->b_bcount - oldcnt;
953 mutex_exit(&wl->wl_mtx);
954 }
955 }
956
957 #endif /* _KERNEL */
958
959 /****************************************************************/
960 /* Some utility inlines */
961
962 /* This is used to advance the pointer at old to new value at old+delta */
963 static __inline off_t
964 wapbl_advance(size_t size, size_t off, off_t old, size_t delta)
965 {
966 off_t new;
967
968 /* Define acceptable ranges for inputs. */
969 KASSERT(delta <= size);
970 KASSERT((old == 0) || (old >= off));
971 KASSERT(old < (size + off));
972
973 if ((old == 0) && (delta != 0))
974 new = off + delta;
975 else if ((old + delta) < (size + off))
976 new = old + delta;
977 else
978 new = (old + delta) - size;
979
980 /* Note some interesting axioms */
981 KASSERT((delta != 0) || (new == old));
982 KASSERT((delta == 0) || (new != 0));
983 KASSERT((delta != (size)) || (new == old));
984
985 /* Define acceptable ranges for output. */
986 KASSERT((new == 0) || (new >= off));
987 KASSERT(new < (size + off));
988 return new;
989 }
990
991 static __inline size_t
992 wapbl_space_used(size_t avail, off_t head, off_t tail)
993 {
994
995 if (tail == 0) {
996 KASSERT(head == 0);
997 return 0;
998 }
999 return ((head + (avail - 1) - tail) % avail) + 1;
1000 }
1001
1002 static __inline size_t
1003 wapbl_space_free(size_t avail, off_t head, off_t tail)
1004 {
1005
1006 return avail - wapbl_space_used(avail, head, tail);
1007 }
1008
1009 static __inline void
1010 wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1011 off_t *tailp)
1012 {
1013 off_t head = *headp;
1014 off_t tail = *tailp;
1015
1016 KASSERT(delta <= wapbl_space_free(size, head, tail));
1017 head = wapbl_advance(size, off, head, delta);
1018 if ((tail == 0) && (head != 0))
1019 tail = off;
1020 *headp = head;
1021 *tailp = tail;
1022 }
1023
1024 static __inline void
1025 wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1026 off_t *tailp)
1027 {
1028 off_t head = *headp;
1029 off_t tail = *tailp;
1030
1031 KASSERT(delta <= wapbl_space_used(size, head, tail));
1032 tail = wapbl_advance(size, off, tail, delta);
1033 if (head == tail) {
1034 head = tail = 0;
1035 }
1036 *headp = head;
1037 *tailp = tail;
1038 }
1039
1040 #ifdef _KERNEL
1041
1042 /****************************************************************/
1043
1044 /*
1045 * Remove transactions whose buffers are completely flushed to disk.
1046 * Will block until at least minfree space is available.
1047 * only intended to be called from inside wapbl_flush and therefore
1048 * does not protect against commit races with itself or with flush.
1049 */
1050 static int
1051 wapbl_truncate(struct wapbl *wl, size_t minfree, int waitonly)
1052 {
1053 size_t delta;
1054 size_t avail;
1055 off_t head;
1056 off_t tail;
1057 int error = 0;
1058
1059 KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1060 KASSERT(rw_write_held(&wl->wl_rwlock));
1061
1062 mutex_enter(&wl->wl_mtx);
1063
1064 /*
1065 * First check to see if we have to do a commit
1066 * at all.
1067 */
1068 avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1069 if (minfree < avail) {
1070 mutex_exit(&wl->wl_mtx);
1071 return 0;
1072 }
1073 minfree -= avail;
1074 while ((wl->wl_error_count == 0) &&
1075 (wl->wl_reclaimable_bytes < minfree)) {
1076 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1077 ("wapbl_truncate: sleeping on %p wl=%p bytes=%zd "
1078 "minfree=%zd\n",
1079 &wl->wl_reclaimable_bytes, wl, wl->wl_reclaimable_bytes,
1080 minfree));
1081
1082 cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1083 }
1084 if (wl->wl_reclaimable_bytes < minfree) {
1085 KASSERT(wl->wl_error_count);
1086 /* XXX maybe get actual error from buffer instead someday? */
1087 error = EIO;
1088 }
1089 head = wl->wl_head;
1090 tail = wl->wl_tail;
1091 delta = wl->wl_reclaimable_bytes;
1092
1093 /* If all of of the entries are flushed, then be sure to keep
1094 * the reserved bytes reserved. Watch out for discarded transactions,
1095 * which could leave more bytes reserved than are reclaimable.
1096 */
1097 if (SIMPLEQ_EMPTY(&wl->wl_entries) &&
1098 (delta >= wl->wl_reserved_bytes)) {
1099 delta -= wl->wl_reserved_bytes;
1100 }
1101 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1102 &tail);
1103 KDASSERT(wl->wl_reserved_bytes <=
1104 wapbl_space_used(wl->wl_circ_size, head, tail));
1105 mutex_exit(&wl->wl_mtx);
1106
1107 if (error)
1108 return error;
1109
1110 if (waitonly)
1111 return 0;
1112
1113 /*
1114 * This is where head, tail and delta are unprotected
1115 * from races against itself or flush. This is ok since
1116 * we only call this routine from inside flush itself.
1117 *
1118 * XXX: how can it race against itself when accessed only
1119 * from behind the write-locked rwlock?
1120 */
1121 error = wapbl_write_commit(wl, head, tail);
1122 if (error)
1123 return error;
1124
1125 wl->wl_head = head;
1126 wl->wl_tail = tail;
1127
1128 mutex_enter(&wl->wl_mtx);
1129 KASSERT(wl->wl_reclaimable_bytes >= delta);
1130 wl->wl_reclaimable_bytes -= delta;
1131 mutex_exit(&wl->wl_mtx);
1132 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1133 ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1134 curproc->p_pid, curlwp->l_lid, delta));
1135
1136 return 0;
1137 }
1138
1139 /****************************************************************/
1140
1141 void
1142 wapbl_biodone(struct buf *bp)
1143 {
1144 struct wapbl_entry *we = bp->b_private;
1145 struct wapbl *wl = we->we_wapbl;
1146
1147 /*
1148 * Handle possible flushing of buffers after log has been
1149 * decomissioned.
1150 */
1151 if (!wl) {
1152 KASSERT(we->we_bufcount > 0);
1153 we->we_bufcount--;
1154 #ifdef WAPBL_DEBUG_BUFBYTES
1155 KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1156 we->we_unsynced_bufbytes -= bp->b_bufsize;
1157 #endif
1158
1159 if (we->we_bufcount == 0) {
1160 #ifdef WAPBL_DEBUG_BUFBYTES
1161 KASSERT(we->we_unsynced_bufbytes == 0);
1162 #endif
1163 wapbl_free(we);
1164 }
1165
1166 brelse(bp, 0);
1167 return;
1168 }
1169
1170 #ifdef ohbother
1171 KDASSERT(bp->b_flags & B_DONE);
1172 KDASSERT(!(bp->b_flags & B_DELWRI));
1173 KDASSERT(bp->b_flags & B_ASYNC);
1174 KDASSERT(bp->b_flags & B_BUSY);
1175 KDASSERT(!(bp->b_flags & B_LOCKED));
1176 KDASSERT(!(bp->b_flags & B_READ));
1177 KDASSERT(!(bp->b_flags & B_INVAL));
1178 KDASSERT(!(bp->b_flags & B_NOCACHE));
1179 #endif
1180
1181 if (bp->b_error) {
1182 #ifdef notyet /* Can't currently handle possible dirty buffer reuse */
1183 XXXpooka: interfaces not fully updated
1184 Note: this was not enabled in the original patch
1185 against netbsd4 either. I don't know if comment
1186 above is true or not.
1187
1188 /*
1189 * If an error occurs, report the error and leave the
1190 * buffer as a delayed write on the LRU queue.
1191 * restarting the write would likely result in
1192 * an error spinloop, so let it be done harmlessly
1193 * by the syncer.
1194 */
1195 bp->b_flags &= ~(B_DONE);
1196 simple_unlock(&bp->b_interlock);
1197
1198 if (we->we_error == 0) {
1199 mutex_enter(&wl->wl_mtx);
1200 wl->wl_error_count++;
1201 mutex_exit(&wl->wl_mtx);
1202 cv_broadcast(&wl->wl_reclaimable_cv);
1203 }
1204 we->we_error = bp->b_error;
1205 bp->b_error = 0;
1206 brelse(bp);
1207 return;
1208 #else
1209 /* For now, just mark the log permanently errored out */
1210
1211 mutex_enter(&wl->wl_mtx);
1212 if (wl->wl_error_count == 0) {
1213 wl->wl_error_count++;
1214 cv_broadcast(&wl->wl_reclaimable_cv);
1215 }
1216 mutex_exit(&wl->wl_mtx);
1217 #endif
1218 }
1219
1220 mutex_enter(&wl->wl_mtx);
1221
1222 KASSERT(we->we_bufcount > 0);
1223 we->we_bufcount--;
1224 #ifdef WAPBL_DEBUG_BUFBYTES
1225 KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1226 we->we_unsynced_bufbytes -= bp->b_bufsize;
1227 KASSERT(wl->wl_unsynced_bufbytes >= bp->b_bufsize);
1228 wl->wl_unsynced_bufbytes -= bp->b_bufsize;
1229 #endif
1230
1231 /*
1232 * If the current transaction can be reclaimed, start
1233 * at the beginning and reclaim any consecutive reclaimable
1234 * transactions. If we successfully reclaim anything,
1235 * then wakeup anyone waiting for the reclaim.
1236 */
1237 if (we->we_bufcount == 0) {
1238 size_t delta = 0;
1239 int errcnt = 0;
1240 #ifdef WAPBL_DEBUG_BUFBYTES
1241 KDASSERT(we->we_unsynced_bufbytes == 0);
1242 #endif
1243 /*
1244 * clear any posted error, since the buffer it came from
1245 * has successfully flushed by now
1246 */
1247 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1248 (we->we_bufcount == 0)) {
1249 delta += we->we_reclaimable_bytes;
1250 if (we->we_error)
1251 errcnt++;
1252 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1253 wapbl_free(we);
1254 }
1255
1256 if (delta) {
1257 wl->wl_reclaimable_bytes += delta;
1258 KASSERT(wl->wl_error_count >= errcnt);
1259 wl->wl_error_count -= errcnt;
1260 cv_broadcast(&wl->wl_reclaimable_cv);
1261 }
1262 }
1263
1264 mutex_exit(&wl->wl_mtx);
1265 brelse(bp, 0);
1266 }
1267
1268 /*
1269 * Write transactions to disk + start I/O for contents
1270 */
1271 int
1272 wapbl_flush(struct wapbl *wl, int waitfor)
1273 {
1274 struct buf *bp;
1275 struct wapbl_entry *we;
1276 off_t off;
1277 off_t head;
1278 off_t tail;
1279 size_t delta = 0;
1280 size_t flushsize;
1281 size_t reserved;
1282 int error = 0;
1283
1284 /*
1285 * Do a quick check to see if a full flush can be skipped
1286 * This assumes that the flush callback does not need to be called
1287 * unless there are other outstanding bufs.
1288 */
1289 if (!waitfor) {
1290 size_t nbufs;
1291 mutex_enter(&wl->wl_mtx); /* XXX need mutex here to
1292 protect the KASSERTS */
1293 nbufs = wl->wl_bufcount;
1294 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1295 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1296 mutex_exit(&wl->wl_mtx);
1297 if (nbufs == 0)
1298 return 0;
1299 }
1300
1301 /*
1302 * XXX we may consider using LK_UPGRADE here
1303 * if we want to call flush from inside a transaction
1304 */
1305 rw_enter(&wl->wl_rwlock, RW_WRITER);
1306 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
1307 wl->wl_dealloccnt);
1308
1309 /*
1310 * Now that we are fully locked and flushed,
1311 * do another check for nothing to do.
1312 */
1313 if (wl->wl_bufcount == 0) {
1314 goto out;
1315 }
1316
1317 #if 0
1318 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1319 ("wapbl_flush thread %d.%d flushing entries with "
1320 "bufcount=%zu bufbytes=%zu\n",
1321 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1322 wl->wl_bufbytes));
1323 #endif
1324
1325 /* Calculate amount of space needed to flush */
1326 flushsize = wapbl_transaction_len(wl);
1327
1328 if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1329 /*
1330 * XXX this could be handled more gracefully, perhaps place
1331 * only a partial transaction in the log and allow the
1332 * remaining to flush without the protection of the journal.
1333 */
1334 panic("wapbl_flush: current transaction too big to flush\n");
1335 }
1336
1337 error = wapbl_truncate(wl, flushsize, 0);
1338 if (error)
1339 goto out2;
1340
1341 off = wl->wl_head;
1342 KASSERT((off == 0) || ((off >= wl->wl_circ_off) &&
1343 (off < wl->wl_circ_off + wl->wl_circ_size)));
1344 error = wapbl_write_blocks(wl, &off);
1345 if (error)
1346 goto out2;
1347 error = wapbl_write_revocations(wl, &off);
1348 if (error)
1349 goto out2;
1350 error = wapbl_write_inodes(wl, &off);
1351 if (error)
1352 goto out2;
1353
1354 reserved = 0;
1355 if (wl->wl_inohashcnt)
1356 reserved = wapbl_transaction_inodes_len(wl);
1357
1358 head = wl->wl_head;
1359 tail = wl->wl_tail;
1360
1361 wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1362 &head, &tail);
1363 #ifdef WAPBL_DEBUG
1364 if (head != off) {
1365 panic("lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1366 " off=%"PRIdMAX" flush=%zu\n",
1367 (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1368 flushsize);
1369 }
1370 #else
1371 KASSERT(head == off);
1372 #endif
1373
1374 /* Opportunistically move the tail forward if we can */
1375 if (!wapbl_lazy_truncate) {
1376 mutex_enter(&wl->wl_mtx);
1377 delta = wl->wl_reclaimable_bytes;
1378 mutex_exit(&wl->wl_mtx);
1379 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1380 &head, &tail);
1381 }
1382
1383 error = wapbl_write_commit(wl, head, tail);
1384 if (error)
1385 goto out2;
1386
1387 /* poolme? or kmemme? */
1388 we = wapbl_calloc(1, sizeof(*we));
1389
1390 #ifdef WAPBL_DEBUG_BUFBYTES
1391 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1392 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1393 " unsynced=%zu"
1394 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1395 "inodes=%d\n",
1396 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1397 wapbl_space_used(wl->wl_circ_size, head, tail),
1398 wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1399 wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1400 wl->wl_inohashcnt));
1401 #else
1402 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1403 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1404 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1405 "inodes=%d\n",
1406 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1407 wapbl_space_used(wl->wl_circ_size, head, tail),
1408 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1409 wl->wl_dealloccnt, wl->wl_inohashcnt));
1410 #endif
1411
1412
1413 mutex_enter(&bufcache_lock);
1414 mutex_enter(&wl->wl_mtx);
1415
1416 wl->wl_reserved_bytes = reserved;
1417 wl->wl_head = head;
1418 wl->wl_tail = tail;
1419 KASSERT(wl->wl_reclaimable_bytes >= delta);
1420 wl->wl_reclaimable_bytes -= delta;
1421 wl->wl_dealloccnt = 0;
1422 #ifdef WAPBL_DEBUG_BUFBYTES
1423 wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1424 #endif
1425
1426 we->we_wapbl = wl;
1427 we->we_bufcount = wl->wl_bufcount;
1428 #ifdef WAPBL_DEBUG_BUFBYTES
1429 we->we_unsynced_bufbytes = wl->wl_bufbytes;
1430 #endif
1431 we->we_reclaimable_bytes = flushsize;
1432 we->we_error = 0;
1433 SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1434
1435 /*
1436 * this flushes bufs in reverse order than they were queued
1437 * it shouldn't matter, but if we care we could use TAILQ instead.
1438 * XXX Note they will get put on the lru queue when they flush
1439 * so we might actually want to change this to preserve order.
1440 */
1441 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
1442 if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1443 continue;
1444 }
1445 bp->b_iodone = wapbl_biodone;
1446 bp->b_private = we;
1447 bremfree(bp);
1448 wapbl_remove_buf_locked(wl, bp);
1449 mutex_exit(&wl->wl_mtx);
1450 mutex_exit(&bufcache_lock);
1451 bawrite(bp);
1452 mutex_enter(&bufcache_lock);
1453 mutex_enter(&wl->wl_mtx);
1454 }
1455 mutex_exit(&wl->wl_mtx);
1456 mutex_exit(&bufcache_lock);
1457
1458 #if 0
1459 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1460 ("wapbl_flush thread %d.%d done flushing entries...\n",
1461 curproc->p_pid, curlwp->l_lid));
1462 #endif
1463
1464 out:
1465
1466 /*
1467 * If the waitfor flag is set, don't return until everything is
1468 * fully flushed and the on disk log is empty.
1469 */
1470 if (waitfor) {
1471 error = wapbl_truncate(wl, wl->wl_circ_size -
1472 wl->wl_reserved_bytes, wapbl_lazy_truncate);
1473 }
1474
1475 out2:
1476 if (error) {
1477 wl->wl_flush_abort(wl->wl_mount, wl->wl_deallocblks,
1478 wl->wl_dealloclens, wl->wl_dealloccnt);
1479 }
1480
1481 #ifdef WAPBL_DEBUG_PRINT
1482 if (error) {
1483 pid_t pid = -1;
1484 lwpid_t lid = -1;
1485 if (curproc)
1486 pid = curproc->p_pid;
1487 if (curlwp)
1488 lid = curlwp->l_lid;
1489 mutex_enter(&wl->wl_mtx);
1490 #ifdef WAPBL_DEBUG_BUFBYTES
1491 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1492 ("wapbl_flush: thread %d.%d aborted flush: "
1493 "error = %d\n"
1494 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1495 "deallocs=%d inodes=%d\n"
1496 "\terrcnt = %d, reclaimable=%zu reserved=%zu "
1497 "unsynced=%zu\n",
1498 pid, lid, error, wl->wl_bufcount,
1499 wl->wl_bufbytes, wl->wl_bcount,
1500 wl->wl_dealloccnt, wl->wl_inohashcnt,
1501 wl->wl_error_count, wl->wl_reclaimable_bytes,
1502 wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1503 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1504 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1505 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1506 "error = %d, unsynced = %zu\n",
1507 we->we_bufcount, we->we_reclaimable_bytes,
1508 we->we_error, we->we_unsynced_bufbytes));
1509 }
1510 #else
1511 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1512 ("wapbl_flush: thread %d.%d aborted flush: "
1513 "error = %d\n"
1514 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1515 "deallocs=%d inodes=%d\n"
1516 "\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
1517 pid, lid, error, wl->wl_bufcount,
1518 wl->wl_bufbytes, wl->wl_bcount,
1519 wl->wl_dealloccnt, wl->wl_inohashcnt,
1520 wl->wl_error_count, wl->wl_reclaimable_bytes,
1521 wl->wl_reserved_bytes));
1522 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1523 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1524 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1525 "error = %d\n", we->we_bufcount,
1526 we->we_reclaimable_bytes, we->we_error));
1527 }
1528 #endif
1529 mutex_exit(&wl->wl_mtx);
1530 }
1531 #endif
1532
1533 rw_exit(&wl->wl_rwlock);
1534 return error;
1535 }
1536
1537 /****************************************************************/
1538
1539 void
1540 wapbl_jlock_assert(struct wapbl *wl)
1541 {
1542
1543 #ifdef WAPBL_DEBUG_SERIALIZE
1544 KASSERT(rw_write_held(&wl->wl_rwlock));
1545 #else
1546 KASSERT(rw_read_held(&wl->wl_rwlock) || rw_write_held(&wl->wl_rwlock));
1547 #endif
1548 }
1549
1550 void
1551 wapbl_junlock_assert(struct wapbl *wl)
1552 {
1553
1554 #ifdef WAPBL_DEBUG_SERIALIZE
1555 KASSERT(!rw_write_held(&wl->wl_rwlock));
1556 #endif
1557 }
1558
1559 /****************************************************************/
1560
1561 /* locks missing */
1562 void
1563 wapbl_print(struct wapbl *wl,
1564 int full,
1565 void (*pr)(const char *, ...))
1566 {
1567 struct buf *bp;
1568 struct wapbl_entry *we;
1569 (*pr)("wapbl %p", wl);
1570 (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
1571 wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
1572 (*pr)("circ = %zu, header = %zu, head = %"PRIdMAX" tail = %"PRIdMAX"\n",
1573 wl->wl_circ_size, wl->wl_circ_off,
1574 (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
1575 (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
1576 wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
1577 #ifdef WAPBL_DEBUG_BUFBYTES
1578 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1579 "reserved = %zu errcnt = %d unsynced = %zu\n",
1580 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1581 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1582 wl->wl_error_count, wl->wl_unsynced_bufbytes);
1583 #else
1584 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1585 "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
1586 wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1587 wl->wl_error_count);
1588 #endif
1589 (*pr)("\tdealloccnt = %d, dealloclim = %d\n",
1590 wl->wl_dealloccnt, wl->wl_dealloclim);
1591 (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
1592 wl->wl_inohashcnt, wl->wl_inohashmask);
1593 (*pr)("entries:\n");
1594 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1595 #ifdef WAPBL_DEBUG_BUFBYTES
1596 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
1597 "unsynced = %zu\n",
1598 we->we_bufcount, we->we_reclaimable_bytes,
1599 we->we_error, we->we_unsynced_bufbytes);
1600 #else
1601 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
1602 we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
1603 #endif
1604 }
1605 if (full) {
1606 int cnt = 0;
1607 (*pr)("bufs =");
1608 LIST_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
1609 if (!LIST_NEXT(bp, b_wapbllist)) {
1610 (*pr)(" %p", bp);
1611 } else if ((++cnt % 6) == 0) {
1612 (*pr)(" %p,\n\t", bp);
1613 } else {
1614 (*pr)(" %p,", bp);
1615 }
1616 }
1617 (*pr)("\n");
1618
1619 (*pr)("dealloced blks = ");
1620 {
1621 int i;
1622 cnt = 0;
1623 for (i = 0; i < wl->wl_dealloccnt; i++) {
1624 (*pr)(" %"PRId64":%d,",
1625 wl->wl_deallocblks[i],
1626 wl->wl_dealloclens[i]);
1627 if ((++cnt % 4) == 0) {
1628 (*pr)("\n\t");
1629 }
1630 }
1631 }
1632 (*pr)("\n");
1633
1634 (*pr)("registered inodes = ");
1635 {
1636 int i;
1637 cnt = 0;
1638 for (i = 0; i <= wl->wl_inohashmask; i++) {
1639 struct wapbl_ino_head *wih;
1640 struct wapbl_ino *wi;
1641
1642 wih = &wl->wl_inohash[i];
1643 LIST_FOREACH(wi, wih, wi_hash) {
1644 if (wi->wi_ino == 0)
1645 continue;
1646 (*pr)(" %"PRId32"/0%06"PRIo32",",
1647 wi->wi_ino, wi->wi_mode);
1648 if ((++cnt % 4) == 0) {
1649 (*pr)("\n\t");
1650 }
1651 }
1652 }
1653 (*pr)("\n");
1654 }
1655 }
1656 }
1657
1658 #if defined(WAPBL_DEBUG) || defined(DDB)
1659 void
1660 wapbl_dump(struct wapbl *wl)
1661 {
1662 #if defined(WAPBL_DEBUG)
1663 if (!wl)
1664 wl = wapbl_debug_wl;
1665 #endif
1666 if (!wl)
1667 return;
1668 wapbl_print(wl, 1, printf);
1669 }
1670 #endif
1671
1672 /****************************************************************/
1673
1674 void
1675 wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len)
1676 {
1677
1678 wapbl_jlock_assert(wl);
1679
1680 /* XXX should eventually instead tie this into resource estimation */
1681 /* XXX this KASSERT needs locking/mutex analysis */
1682 KASSERT(wl->wl_dealloccnt < wl->wl_dealloclim);
1683 wl->wl_deallocblks[wl->wl_dealloccnt] = blk;
1684 wl->wl_dealloclens[wl->wl_dealloccnt] = len;
1685 wl->wl_dealloccnt++;
1686 WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
1687 ("wapbl_register_deallocation: blk=%"PRId64" len=%d\n", blk, len));
1688 }
1689
1690 /****************************************************************/
1691
1692 /*
1693 * Singleton pool init
1694 */
1695 static void
1696 wapbl_pool_init(int *refcnt, struct pool *pp, size_t size, const char *wchan)
1697 {
1698
1699 mutex_enter(&wapbl_global_mtx);
1700 if ((*refcnt)++ == 0)
1701 pool_init(pp, size, 0, 0, 0, wchan,
1702 &pool_allocator_nointr, IPL_NONE);
1703 mutex_exit(&wapbl_global_mtx);
1704 }
1705
1706 static void
1707 wapbl_pool_done(volatile int *refcnt, struct pool *pp)
1708 {
1709
1710 mutex_enter(&wapbl_global_mtx);
1711 if (--(*refcnt) == 0)
1712 pool_destroy(pp);
1713 mutex_exit(&wapbl_global_mtx);
1714 }
1715
1716 static void
1717 wapbl_inodetrk_init(struct wapbl *wl, u_int size)
1718 {
1719
1720 wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
1721 wapbl_pool_init(&wapbl_ino_pool_refcount, &wapbl_ino_pool,
1722 sizeof(struct wapbl_ino), "wapblinopl");
1723 }
1724
1725 static void
1726 wapbl_inodetrk_free(struct wapbl *wl)
1727 {
1728
1729 /* XXX this KASSERT needs locking/mutex analysis */
1730 KASSERT(wl->wl_inohashcnt == 0);
1731 hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
1732 wapbl_pool_done(&wapbl_ino_pool_refcount, &wapbl_ino_pool);
1733 }
1734
1735 static struct wapbl_ino *
1736 wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
1737 {
1738 struct wapbl_ino_head *wih;
1739 struct wapbl_ino *wi;
1740
1741 KASSERT(mutex_owned(&wl->wl_mtx));
1742
1743 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1744 LIST_FOREACH(wi, wih, wi_hash) {
1745 if (ino == wi->wi_ino)
1746 return wi;
1747 }
1748 return 0;
1749 }
1750
1751 void
1752 wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1753 {
1754 struct wapbl_ino_head *wih;
1755 struct wapbl_ino *wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
1756
1757 mutex_enter(&wl->wl_mtx);
1758 if (wapbl_inodetrk_get(wl, ino)) {
1759 pool_put(&wapbl_ino_pool, wi);
1760 } else {
1761 wi->wi_ino = ino;
1762 wi->wi_mode = mode;
1763 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1764 LIST_INSERT_HEAD(wih, wi, wi_hash);
1765 wl->wl_inohashcnt++;
1766 WAPBL_PRINTF(WAPBL_PRINT_INODE,
1767 ("wapbl_register_inode: ino=%"PRId64"\n", ino));
1768 }
1769 mutex_exit(&wl->wl_mtx);
1770 }
1771
1772 void
1773 wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1774 {
1775 struct wapbl_ino *wi;
1776
1777 mutex_enter(&wl->wl_mtx);
1778 wi = wapbl_inodetrk_get(wl, ino);
1779 if (wi) {
1780 WAPBL_PRINTF(WAPBL_PRINT_INODE,
1781 ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
1782 KASSERT(wl->wl_inohashcnt > 0);
1783 wl->wl_inohashcnt--;
1784 LIST_REMOVE(wi, wi_hash);
1785 mutex_exit(&wl->wl_mtx);
1786
1787 pool_put(&wapbl_ino_pool, wi);
1788 } else {
1789 mutex_exit(&wl->wl_mtx);
1790 }
1791 }
1792
1793 /****************************************************************/
1794
1795 static __inline size_t
1796 wapbl_transaction_inodes_len(struct wapbl *wl)
1797 {
1798 int blocklen = 1<<wl->wl_log_dev_bshift;
1799 int iph;
1800
1801 /* Calculate number of inodes described in a inodelist header */
1802 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
1803 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
1804
1805 KASSERT(iph > 0);
1806
1807 return MAX(1, howmany(wl->wl_inohashcnt, iph))*blocklen;
1808 }
1809
1810
1811 /* Calculate amount of space a transaction will take on disk */
1812 static size_t
1813 wapbl_transaction_len(struct wapbl *wl)
1814 {
1815 int blocklen = 1<<wl->wl_log_dev_bshift;
1816 size_t len;
1817 int bph;
1818
1819 /* Calculate number of blocks described in a blocklist header */
1820 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1821 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1822
1823 KASSERT(bph > 0);
1824
1825 len = wl->wl_bcount;
1826 len += howmany(wl->wl_bufcount, bph)*blocklen;
1827 len += howmany(wl->wl_dealloccnt, bph)*blocklen;
1828 len += wapbl_transaction_inodes_len(wl);
1829
1830 return len;
1831 }
1832
1833 /*
1834 * Perform commit operation
1835 *
1836 * Note that generation number incrementation needs to
1837 * be protected against racing with other invocations
1838 * of wapbl_commit. This is ok since this routine
1839 * is only invoked from wapbl_flush
1840 */
1841 static int
1842 wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
1843 {
1844 struct wapbl_wc_header *wc = wl->wl_wc_header;
1845 struct timespec ts;
1846 int error;
1847 int force = 1;
1848
1849 /* XXX Calc checksum here, instead we do this for now */
1850 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1851 if (error) {
1852 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1853 ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1854 "returned %d\n", wl->wl_devvp->v_rdev, error));
1855 }
1856
1857 wc->wc_head = head;
1858 wc->wc_tail = tail;
1859 wc->wc_checksum = 0;
1860 wc->wc_version = 1;
1861 getnanotime(&ts); /* XXX need higher resolution time here? */
1862 wc->wc_time = ts.tv_sec;;
1863 wc->wc_timensec = ts.tv_nsec;
1864
1865 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1866 ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
1867 (intmax_t)head, (intmax_t)tail));
1868
1869 /*
1870 * XXX if generation will rollover, then first zero
1871 * over second commit header before trying to write both headers.
1872 */
1873
1874 error = wapbl_write(wc, wc->wc_len, wl->wl_devvp,
1875 wl->wl_logpbn + wc->wc_generation % 2);
1876 if (error)
1877 return error;
1878
1879 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1880 if (error) {
1881 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1882 ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1883 "returned %d\n", wl->wl_devvp->v_rdev, error));
1884 }
1885
1886 /*
1887 * If the generation number was zero, write it out a second time.
1888 * This handles initialization and generation number rollover
1889 */
1890 if (wc->wc_generation++ == 0) {
1891 error = wapbl_write_commit(wl, head, tail);
1892 /*
1893 * This panic should be able to be removed if we do the
1894 * zero'ing mentioned above, and we are certain to roll
1895 * back generation number on failure.
1896 */
1897 if (error)
1898 panic("wapbl_write_commit: error writing duplicate "
1899 "log header: %d\n", error);
1900 }
1901 return 0;
1902 }
1903
1904 /* Returns new offset value */
1905 static int
1906 wapbl_write_blocks(struct wapbl *wl, off_t *offp)
1907 {
1908 struct wapbl_wc_blocklist *wc =
1909 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1910 int blocklen = 1<<wl->wl_log_dev_bshift;
1911 int bph;
1912 struct buf *bp;
1913 off_t off = *offp;
1914 int error;
1915
1916 KASSERT(rw_write_held(&wl->wl_rwlock));
1917
1918 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1919 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1920
1921 bp = LIST_FIRST(&wl->wl_bufs);
1922
1923 while (bp) {
1924 int cnt;
1925 struct buf *obp = bp;
1926
1927 KASSERT(bp->b_flags & B_LOCKED);
1928
1929 wc->wc_type = WAPBL_WC_BLOCKS;
1930 wc->wc_len = blocklen;
1931 wc->wc_blkcount = 0;
1932 while (bp && (wc->wc_blkcount < bph)) {
1933 /*
1934 * Make sure all the physical block numbers are up to
1935 * date. If this is not always true on a given
1936 * filesystem, then VOP_BMAP must be called. We
1937 * could call VOP_BMAP here, or else in the filesystem
1938 * specific flush callback, although neither of those
1939 * solutions allow us to take the vnode lock. If a
1940 * filesystem requires that we must take the vnode lock
1941 * to call VOP_BMAP, then we can probably do it in
1942 * bwrite when the vnode lock should already be held
1943 * by the invoking code.
1944 */
1945 KASSERT((bp->b_vp->v_type == VBLK) ||
1946 (bp->b_blkno != bp->b_lblkno));
1947 KASSERT(bp->b_blkno > 0);
1948
1949 wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
1950 wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
1951 wc->wc_len += bp->b_bcount;
1952 wc->wc_blkcount++;
1953 bp = LIST_NEXT(bp, b_wapbllist);
1954 }
1955 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1956 ("wapbl_write_blocks: len = %u off = %"PRIdMAX"\n",
1957 wc->wc_len, (intmax_t)off));
1958
1959 error = wapbl_circ_write(wl, wc, blocklen, &off);
1960 if (error)
1961 return error;
1962 bp = obp;
1963 cnt = 0;
1964 while (bp && (cnt++ < bph)) {
1965 error = wapbl_circ_write(wl, bp->b_data,
1966 bp->b_bcount, &off);
1967 if (error)
1968 return error;
1969 bp = LIST_NEXT(bp, b_wapbllist);
1970 }
1971 }
1972 *offp = off;
1973 return 0;
1974 }
1975
1976 static int
1977 wapbl_write_revocations(struct wapbl *wl, off_t *offp)
1978 {
1979 struct wapbl_wc_blocklist *wc =
1980 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1981 int i;
1982 int blocklen = 1<<wl->wl_log_dev_bshift;
1983 int bph;
1984 off_t off = *offp;
1985 int error;
1986
1987 if (wl->wl_dealloccnt == 0)
1988 return 0;
1989
1990 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1991 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1992
1993 i = 0;
1994 while (i < wl->wl_dealloccnt) {
1995 wc->wc_type = WAPBL_WC_REVOCATIONS;
1996 wc->wc_len = blocklen;
1997 wc->wc_blkcount = 0;
1998 while ((i < wl->wl_dealloccnt) && (wc->wc_blkcount < bph)) {
1999 wc->wc_blocks[wc->wc_blkcount].wc_daddr =
2000 wl->wl_deallocblks[i];
2001 wc->wc_blocks[wc->wc_blkcount].wc_dlen =
2002 wl->wl_dealloclens[i];
2003 wc->wc_blkcount++;
2004 i++;
2005 }
2006 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2007 ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
2008 wc->wc_len, (intmax_t)off));
2009 error = wapbl_circ_write(wl, wc, blocklen, &off);
2010 if (error)
2011 return error;
2012 }
2013 *offp = off;
2014 return 0;
2015 }
2016
2017 static int
2018 wapbl_write_inodes(struct wapbl *wl, off_t *offp)
2019 {
2020 struct wapbl_wc_inodelist *wc =
2021 (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
2022 int i;
2023 int blocklen = 1<<wl->wl_log_dev_bshift;
2024 off_t off = *offp;
2025 int error;
2026
2027 struct wapbl_ino_head *wih;
2028 struct wapbl_ino *wi;
2029 int iph;
2030
2031 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2032 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2033
2034 i = 0;
2035 wih = &wl->wl_inohash[0];
2036 wi = 0;
2037 do {
2038 wc->wc_type = WAPBL_WC_INODES;
2039 wc->wc_len = blocklen;
2040 wc->wc_inocnt = 0;
2041 wc->wc_clear = (i == 0);
2042 while ((i < wl->wl_inohashcnt) && (wc->wc_inocnt < iph)) {
2043 while (!wi) {
2044 KASSERT((wih - &wl->wl_inohash[0])
2045 <= wl->wl_inohashmask);
2046 wi = LIST_FIRST(wih++);
2047 }
2048 wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2049 wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2050 wc->wc_inocnt++;
2051 i++;
2052 wi = LIST_NEXT(wi, wi_hash);
2053 }
2054 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2055 ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2056 wc->wc_len, (intmax_t)off));
2057 error = wapbl_circ_write(wl, wc, blocklen, &off);
2058 if (error)
2059 return error;
2060 } while (i < wl->wl_inohashcnt);
2061
2062 *offp = off;
2063 return 0;
2064 }
2065
2066 #endif /* _KERNEL */
2067
2068 /****************************************************************/
2069
2070 #ifdef _KERNEL
2071 static struct pool wapbl_blk_pool;
2072 static int wapbl_blk_pool_refcount;
2073 #endif
2074 struct wapbl_blk {
2075 LIST_ENTRY(wapbl_blk) wb_hash;
2076 daddr_t wb_blk;
2077 off_t wb_off; /* Offset of this block in the log */
2078 };
2079 #define WAPBL_BLKPOOL_MIN 83
2080
2081 static void
2082 wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2083 {
2084 if (size < WAPBL_BLKPOOL_MIN)
2085 size = WAPBL_BLKPOOL_MIN;
2086 KASSERT(wr->wr_blkhash == 0);
2087 #ifdef _KERNEL
2088 wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2089 wapbl_pool_init(&wapbl_blk_pool_refcount, &wapbl_blk_pool,
2090 sizeof(struct wapbl_blk), "wapblblkpl");
2091 #else /* ! _KERNEL */
2092 /* Manually implement hashinit */
2093 {
2094 int i;
2095 unsigned long hashsize;
2096 for (hashsize = 1; hashsize < size; hashsize <<= 1)
2097 continue;
2098 wr->wr_blkhash = wapbl_malloc(hashsize * sizeof(*wr->wr_blkhash));
2099 for (i = 0; i < wr->wr_blkhashmask; i++)
2100 LIST_INIT(&wr->wr_blkhash[i]);
2101 wr->wr_blkhashmask = hashsize - 1;
2102 }
2103 #endif /* ! _KERNEL */
2104 }
2105
2106 static void
2107 wapbl_blkhash_free(struct wapbl_replay *wr)
2108 {
2109 KASSERT(wr->wr_blkhashcnt == 0);
2110 #ifdef _KERNEL
2111 hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2112 wapbl_pool_done(&wapbl_blk_pool_refcount, &wapbl_blk_pool);
2113 #else /* ! _KERNEL */
2114 wapbl_free(wr->wr_blkhash);
2115 #endif /* ! _KERNEL */
2116 }
2117
2118 static struct wapbl_blk *
2119 wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2120 {
2121 struct wapbl_blk_head *wbh;
2122 struct wapbl_blk *wb;
2123 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2124 LIST_FOREACH(wb, wbh, wb_hash) {
2125 if (blk == wb->wb_blk)
2126 return wb;
2127 }
2128 return 0;
2129 }
2130
2131 static void
2132 wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2133 {
2134 struct wapbl_blk_head *wbh;
2135 struct wapbl_blk *wb;
2136 wb = wapbl_blkhash_get(wr, blk);
2137 if (wb) {
2138 KASSERT(wb->wb_blk == blk);
2139 wb->wb_off = off;
2140 } else {
2141 #ifdef _KERNEL
2142 wb = pool_get(&wapbl_blk_pool, PR_WAITOK);
2143 #else /* ! _KERNEL */
2144 wb = wapbl_malloc(sizeof(*wb));
2145 #endif /* ! _KERNEL */
2146 wb->wb_blk = blk;
2147 wb->wb_off = off;
2148 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2149 LIST_INSERT_HEAD(wbh, wb, wb_hash);
2150 wr->wr_blkhashcnt++;
2151 }
2152 }
2153
2154 static void
2155 wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2156 {
2157 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2158 if (wb) {
2159 KASSERT(wr->wr_blkhashcnt > 0);
2160 wr->wr_blkhashcnt--;
2161 LIST_REMOVE(wb, wb_hash);
2162 #ifdef _KERNEL
2163 pool_put(&wapbl_blk_pool, wb);
2164 #else /* ! _KERNEL */
2165 wapbl_free(wb);
2166 #endif /* ! _KERNEL */
2167 }
2168 }
2169
2170 static void
2171 wapbl_blkhash_clear(struct wapbl_replay *wr)
2172 {
2173 int i;
2174 for (i = 0; i <= wr->wr_blkhashmask; i++) {
2175 struct wapbl_blk *wb;
2176
2177 while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2178 KASSERT(wr->wr_blkhashcnt > 0);
2179 wr->wr_blkhashcnt--;
2180 LIST_REMOVE(wb, wb_hash);
2181 #ifdef _KERNEL
2182 pool_put(&wapbl_blk_pool, wb);
2183 #else /* ! _KERNEL */
2184 wapbl_free(wb);
2185 #endif /* ! _KERNEL */
2186 }
2187 }
2188 KASSERT(wr->wr_blkhashcnt == 0);
2189 }
2190
2191 /****************************************************************/
2192
2193 static int
2194 wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2195 {
2196 size_t slen;
2197 struct wapbl_wc_header *wc = &wr->wr_wc_header;
2198 off_t off = *offp;
2199 int error;
2200
2201 KASSERT(((len >> wc->wc_log_dev_bshift) <<
2202 wc->wc_log_dev_bshift) == len);
2203 if (off < wc->wc_circ_off)
2204 off = wc->wc_circ_off;
2205 slen = wc->wc_circ_off + wc->wc_circ_size - off;
2206 if (slen < len) {
2207 error = wapbl_read(data, slen, wr->wr_devvp,
2208 wr->wr_logpbn + (off >> wc->wc_log_dev_bshift));
2209 if (error)
2210 return error;
2211 data = (uint8_t *)data + slen;
2212 len -= slen;
2213 off = wc->wc_circ_off;
2214 }
2215 error = wapbl_read(data, len, wr->wr_devvp,
2216 wr->wr_logpbn + (off >> wc->wc_log_dev_bshift));
2217 if (error)
2218 return error;
2219 off += len;
2220 if (off >= wc->wc_circ_off + wc->wc_circ_size)
2221 off = wc->wc_circ_off;
2222 *offp = off;
2223 return 0;
2224 }
2225
2226 static void
2227 wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2228 {
2229 size_t slen;
2230 struct wapbl_wc_header *wc = &wr->wr_wc_header;
2231 off_t off = *offp;
2232
2233 KASSERT(((len >> wc->wc_log_dev_bshift) <<
2234 wc->wc_log_dev_bshift) == len);
2235
2236 if (off < wc->wc_circ_off)
2237 off = wc->wc_circ_off;
2238 slen = wc->wc_circ_off + wc->wc_circ_size - off;
2239 if (slen < len) {
2240 len -= slen;
2241 off = wc->wc_circ_off;
2242 }
2243 off += len;
2244 if (off >= wc->wc_circ_off + wc->wc_circ_size)
2245 off = wc->wc_circ_off;
2246 *offp = off;
2247 }
2248
2249 /****************************************************************/
2250
2251 int
2252 wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2253 daddr_t off, size_t count, size_t blksize)
2254 {
2255 struct wapbl_replay *wr;
2256 int error;
2257 struct vnode *devvp;
2258 daddr_t logpbn;
2259 uint8_t *scratch;
2260 struct wapbl_wc_header *wch;
2261 struct wapbl_wc_header *wch2;
2262 /* Use this until we read the actual log header */
2263 int log_dev_bshift = DEV_BSHIFT;
2264 size_t used;
2265
2266 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2267 ("wapbl_replay_start: vp=%p off=%"PRId64 " count=%zu blksize=%zu\n",
2268 vp, off, count, blksize));
2269
2270 if (off < 0)
2271 return EINVAL;
2272
2273 if (blksize < DEV_BSIZE)
2274 return EINVAL;
2275 if (blksize % DEV_BSIZE)
2276 return EINVAL;
2277
2278 #ifdef _KERNEL
2279 #if 0
2280 /* XXX vp->v_size isn't reliably set for VBLK devices,
2281 * especially root. However, we might still want to verify
2282 * that the full load is readable */
2283 if ((off + count) * blksize > vp->v_size)
2284 return EINVAL;
2285 #endif
2286
2287 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2288 return error;
2289 }
2290 #else /* ! _KERNEL */
2291 devvp = vp;
2292 logpbn = off;
2293 #endif /* ! _KERNEL */
2294
2295 scratch = wapbl_malloc(MAXBSIZE);
2296
2297 error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, logpbn);
2298 if (error)
2299 goto errout;
2300
2301 wch = (struct wapbl_wc_header *)scratch;
2302 wch2 =
2303 (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
2304 /* XXX verify checksums and magic numbers */
2305 if (wch->wc_type != WAPBL_WC_HEADER) {
2306 printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
2307 error = EFTYPE;
2308 goto errout;
2309 }
2310
2311 if (wch2->wc_generation > wch->wc_generation)
2312 wch = wch2;
2313
2314 wr = wapbl_calloc(1, sizeof(*wr));
2315
2316 wr->wr_logvp = vp;
2317 wr->wr_devvp = devvp;
2318 wr->wr_logpbn = logpbn;
2319
2320 wr->wr_scratch = scratch;
2321
2322 memcpy(&wr->wr_wc_header, wch, sizeof(wr->wr_wc_header));
2323
2324 used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
2325
2326 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2327 ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
2328 " len=%"PRId64" used=%zu\n",
2329 wch->wc_head, wch->wc_tail, wch->wc_circ_off,
2330 wch->wc_circ_size, used));
2331
2332 wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
2333 error = wapbl_replay_prescan(wr);
2334 if (error) {
2335 wapbl_replay_stop(wr);
2336 wapbl_replay_free(wr);
2337 return error;
2338 }
2339
2340 error = wapbl_replay_get_inodes(wr);
2341 if (error) {
2342 wapbl_replay_stop(wr);
2343 wapbl_replay_free(wr);
2344 return error;
2345 }
2346
2347 *wrp = wr;
2348 return 0;
2349
2350 errout:
2351 wapbl_free(scratch);
2352 return error;
2353 }
2354
2355 void
2356 wapbl_replay_stop(struct wapbl_replay *wr)
2357 {
2358
2359 WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
2360
2361 KDASSERT(wapbl_replay_isopen(wr));
2362
2363 wapbl_free(wr->wr_scratch);
2364 wr->wr_scratch = 0;
2365
2366 wr->wr_logvp = 0;
2367
2368 wapbl_blkhash_clear(wr);
2369 wapbl_blkhash_free(wr);
2370 }
2371
2372 void
2373 wapbl_replay_free(struct wapbl_replay *wr)
2374 {
2375
2376 KDASSERT(!wapbl_replay_isopen(wr));
2377
2378 if (wr->wr_inodes)
2379 wapbl_free(wr->wr_inodes);
2380 wapbl_free(wr);
2381 }
2382
2383 int
2384 wapbl_replay_isopen1(struct wapbl_replay *wr)
2385 {
2386
2387 return wapbl_replay_isopen(wr);
2388 }
2389
2390 static int
2391 wapbl_replay_prescan(struct wapbl_replay *wr)
2392 {
2393 off_t off;
2394 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2395 int error;
2396
2397 int logblklen = 1<<wch->wc_log_dev_bshift;
2398 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2399
2400 wapbl_blkhash_clear(wr);
2401
2402 off = wch->wc_tail;
2403 while (off != wch->wc_head) {
2404 struct wapbl_wc_null *wcn;
2405 off_t saveoff = off;
2406 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2407 if (error)
2408 goto errout;
2409 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2410 switch (wcn->wc_type) {
2411 case WAPBL_WC_BLOCKS:
2412 {
2413 struct wapbl_wc_blocklist *wc =
2414 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2415 int i;
2416 for (i = 0; i < wc->wc_blkcount; i++) {
2417 int j, n;
2418 /*
2419 * Enter each physical block into the
2420 * hashtable independently
2421 */
2422 n = wc->wc_blocks[i].wc_dlen >>
2423 wch->wc_fs_dev_bshift;
2424 for (j = 0; j < n; j++) {
2425 wapbl_blkhash_ins(wr,
2426 wc->wc_blocks[i].wc_daddr + j,
2427 off);
2428 wapbl_circ_advance(wr,
2429 fsblklen, &off);
2430 }
2431 }
2432 }
2433 break;
2434
2435 case WAPBL_WC_REVOCATIONS:
2436 {
2437 struct wapbl_wc_blocklist *wc =
2438 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2439 int i;
2440 for (i = 0; i < wc->wc_blkcount; i++) {
2441 int j, n;
2442 /*
2443 * Remove any blocks found from the
2444 * hashtable
2445 */
2446 n = wc->wc_blocks[i].wc_dlen >>
2447 wch->wc_fs_dev_bshift;
2448 for (j = 0; j < n; j++) {
2449 wapbl_blkhash_rem(wr,
2450 wc->wc_blocks[i].wc_daddr + j);
2451 }
2452 }
2453 }
2454 break;
2455
2456 case WAPBL_WC_INODES:
2457 {
2458 struct wapbl_wc_inodelist *wc =
2459 (struct wapbl_wc_inodelist *)wr->wr_scratch;
2460 /*
2461 * Keep track of where we found this so we
2462 * can use it later
2463 */
2464 if (wc->wc_clear) {
2465 wr->wr_inodestail = saveoff;
2466 wr->wr_inodescnt = 0;
2467 }
2468 if (wr->wr_inodestail)
2469 wr->wr_inodeshead = off;
2470 wr->wr_inodescnt += wc->wc_inocnt;
2471 }
2472 break;
2473 default:
2474 printf("Unrecognized wapbl type: 0x%08x\n",
2475 wcn->wc_type);
2476 error = EFTYPE;
2477 goto errout;
2478 }
2479 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2480 if (off != saveoff) {
2481 printf("wapbl_replay: corrupted records\n");
2482 error = EFTYPE;
2483 goto errout;
2484 }
2485 }
2486 return 0;
2487
2488 errout:
2489 wapbl_blkhash_clear(wr);
2490 return error;
2491 }
2492
2493 static int
2494 wapbl_replay_get_inodes(struct wapbl_replay *wr)
2495 {
2496 off_t off;
2497 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2498 int logblklen = 1<<wch->wc_log_dev_bshift;
2499 int cnt= 0;
2500
2501 KDASSERT(wapbl_replay_isopen(wr));
2502
2503 if (wr->wr_inodescnt == 0)
2504 return 0;
2505
2506 KASSERT(!wr->wr_inodes);
2507
2508 wr->wr_inodes = wapbl_malloc(wr->wr_inodescnt*sizeof(wr->wr_inodes[0]));
2509
2510 off = wr->wr_inodestail;
2511
2512 while (off != wr->wr_inodeshead) {
2513 struct wapbl_wc_null *wcn;
2514 int error;
2515 off_t saveoff = off;
2516 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2517 if (error) {
2518 wapbl_free(wr->wr_inodes);
2519 wr->wr_inodes = 0;
2520 return error;
2521 }
2522 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2523 switch (wcn->wc_type) {
2524 case WAPBL_WC_BLOCKS:
2525 case WAPBL_WC_REVOCATIONS:
2526 break;
2527 case WAPBL_WC_INODES:
2528 {
2529 struct wapbl_wc_inodelist *wc =
2530 (struct wapbl_wc_inodelist *)wr->wr_scratch;
2531 /*
2532 * Keep track of where we found this so we
2533 * can use it later
2534 */
2535 if (wc->wc_clear) {
2536 cnt = 0;
2537 }
2538 /* This memcpy assumes that wr_inodes is
2539 * laid out the same as wc_inodes. */
2540 memcpy(&wr->wr_inodes[cnt], wc->wc_inodes,
2541 wc->wc_inocnt*sizeof(wc->wc_inodes[0]));
2542 cnt += wc->wc_inocnt;
2543 }
2544 break;
2545 default:
2546 KASSERT(0);
2547 }
2548 off = saveoff;
2549 wapbl_circ_advance(wr, wcn->wc_len, &off);
2550 }
2551 KASSERT(cnt == wr->wr_inodescnt);
2552 return 0;
2553 }
2554
2555 #ifdef DEBUG
2556 int
2557 wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
2558 {
2559 off_t off;
2560 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2561 int mismatchcnt = 0;
2562 int logblklen = 1<<wch->wc_log_dev_bshift;
2563 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2564 void *scratch1 = wapbl_malloc(MAXBSIZE);
2565 void *scratch2 = wapbl_malloc(MAXBSIZE);
2566 int error = 0;
2567
2568 KDASSERT(wapbl_replay_isopen(wr));
2569
2570 off = wch->wc_tail;
2571 while (off != wch->wc_head) {
2572 struct wapbl_wc_null *wcn;
2573 #ifdef DEBUG
2574 off_t saveoff = off;
2575 #endif
2576 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2577 if (error)
2578 goto out;
2579 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2580 switch (wcn->wc_type) {
2581 case WAPBL_WC_BLOCKS:
2582 {
2583 struct wapbl_wc_blocklist *wc =
2584 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2585 int i;
2586 for (i = 0; i < wc->wc_blkcount; i++) {
2587 int foundcnt = 0;
2588 int dirtycnt = 0;
2589 int j, n;
2590 /*
2591 * Check each physical block into the
2592 * hashtable independently
2593 */
2594 n = wc->wc_blocks[i].wc_dlen >>
2595 wch->wc_fs_dev_bshift;
2596 for (j = 0; j < n; j++) {
2597 struct wapbl_blk *wb =
2598 wapbl_blkhash_get(wr,
2599 wc->wc_blocks[i].wc_daddr + j);
2600 if (wb && (wb->wb_off == off)) {
2601 foundcnt++;
2602 error =
2603 wapbl_circ_read(wr,
2604 scratch1, fsblklen,
2605 &off);
2606 if (error)
2607 goto out;
2608 error =
2609 wapbl_read(scratch2,
2610 fsblklen, fsdevvp,
2611 wb->wb_blk);
2612 if (error)
2613 goto out;
2614 if (memcmp(scratch1,
2615 scratch2,
2616 fsblklen)) {
2617 printf(
2618 "wapbl_verify: mismatch block %"PRId64" at off %"PRIdMAX"\n",
2619 wb->wb_blk, (intmax_t)off);
2620 dirtycnt++;
2621 mismatchcnt++;
2622 }
2623 } else {
2624 wapbl_circ_advance(wr,
2625 fsblklen, &off);
2626 }
2627 }
2628 #if 0
2629 /*
2630 * If all of the blocks in an entry
2631 * are clean, then remove all of its
2632 * blocks from the hashtable since they
2633 * never will need replay.
2634 */
2635 if ((foundcnt != 0) &&
2636 (dirtycnt == 0)) {
2637 off = saveoff;
2638 wapbl_circ_advance(wr,
2639 logblklen, &off);
2640 for (j = 0; j < n; j++) {
2641 struct wapbl_blk *wb =
2642 wapbl_blkhash_get(wr,
2643 wc->wc_blocks[i].wc_daddr + j);
2644 if (wb &&
2645 (wb->wb_off == off)) {
2646 wapbl_blkhash_rem(wr, wb->wb_blk);
2647 }
2648 wapbl_circ_advance(wr,
2649 fsblklen, &off);
2650 }
2651 }
2652 #endif
2653 }
2654 }
2655 break;
2656 case WAPBL_WC_REVOCATIONS:
2657 case WAPBL_WC_INODES:
2658 break;
2659 default:
2660 KASSERT(0);
2661 }
2662 #ifdef DEBUG
2663 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2664 KASSERT(off == saveoff);
2665 #endif
2666 }
2667 out:
2668 wapbl_free(scratch1);
2669 wapbl_free(scratch2);
2670 if (!error && mismatchcnt)
2671 error = EFTYPE;
2672 return error;
2673 }
2674 #endif
2675
2676 int
2677 wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
2678 {
2679 off_t off;
2680 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2681 int logblklen = 1<<wch->wc_log_dev_bshift;
2682 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2683 void *scratch1 = wapbl_malloc(MAXBSIZE);
2684 int error = 0;
2685
2686 KDASSERT(wapbl_replay_isopen(wr));
2687
2688 /*
2689 * This parses the journal for replay, although it could
2690 * just as easily walk the hashtable instead.
2691 */
2692
2693 off = wch->wc_tail;
2694 while (off != wch->wc_head) {
2695 struct wapbl_wc_null *wcn;
2696 #ifdef DEBUG
2697 off_t saveoff = off;
2698 #endif
2699 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2700 if (error)
2701 goto out;
2702 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2703 switch (wcn->wc_type) {
2704 case WAPBL_WC_BLOCKS:
2705 {
2706 struct wapbl_wc_blocklist *wc =
2707 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2708 int i;
2709 for (i = 0; i < wc->wc_blkcount; i++) {
2710 int j, n;
2711 /*
2712 * Check each physical block against
2713 * the hashtable independently
2714 */
2715 n = wc->wc_blocks[i].wc_dlen >>
2716 wch->wc_fs_dev_bshift;
2717 for (j = 0; j < n; j++) {
2718 struct wapbl_blk *wb =
2719 wapbl_blkhash_get(wr,
2720 wc->wc_blocks[i].wc_daddr + j);
2721 if (wb && (wb->wb_off == off)) {
2722 error = wapbl_circ_read(
2723 wr, scratch1,
2724 fsblklen, &off);
2725 if (error)
2726 goto out;
2727 error =
2728 wapbl_write(scratch1,
2729 fsblklen, fsdevvp,
2730 wb->wb_blk);
2731 if (error)
2732 goto out;
2733 } else {
2734 wapbl_circ_advance(wr,
2735 fsblklen, &off);
2736 }
2737 }
2738 }
2739 }
2740 break;
2741 case WAPBL_WC_REVOCATIONS:
2742 case WAPBL_WC_INODES:
2743 break;
2744 default:
2745 KASSERT(0);
2746 }
2747 #ifdef DEBUG
2748 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2749 KASSERT(off == saveoff);
2750 #endif
2751 }
2752 out:
2753 wapbl_free(scratch1);
2754 return error;
2755 }
2756
2757 int
2758 wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
2759 {
2760 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2761 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2762
2763 KDASSERT(wapbl_replay_isopen(wr));
2764
2765 KASSERT((len % fsblklen) == 0);
2766
2767 while (len != 0) {
2768 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2769 if (wb) {
2770 off_t off = wb->wb_off;
2771 int error;
2772 error = wapbl_circ_read(wr, data, fsblklen, &off);
2773 if (error)
2774 return error;
2775 }
2776 data = (uint8_t *)data + fsblklen;
2777 len -= fsblklen;
2778 blk++;
2779 }
2780 return 0;
2781 }
2782