vfs_wapbl.c revision 1.3.8.3 1 /* $NetBSD: vfs_wapbl.c,v 1.3.8.3 2010/11/22 02:52:29 riz Exp $ */
2
3 /*-
4 * Copyright (c) 2003, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This implements file system independent write ahead filesystem logging.
34 */
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.3.8.3 2010/11/22 02:52:29 riz Exp $");
37
38 #include <sys/param.h>
39
40 #ifdef _KERNEL
41 #include <sys/param.h>
42 #include <sys/namei.h>
43 #include <sys/proc.h>
44 #include <sys/uio.h>
45 #include <sys/vnode.h>
46 #include <sys/file.h>
47 #include <sys/malloc.h>
48 #include <sys/resourcevar.h>
49 #include <sys/conf.h>
50 #include <sys/mount.h>
51 #include <sys/kernel.h>
52 #include <sys/kauth.h>
53 #include <sys/mutex.h>
54 #include <sys/atomic.h>
55 #include <sys/wapbl.h>
56
57 #if WAPBL_UVM_ALLOC
58 #include <uvm/uvm.h>
59 #endif
60
61 #include <miscfs/specfs/specdev.h>
62
63 MALLOC_JUSTDEFINE(M_WAPBL, "wapbl", "write-ahead physical block logging");
64 #define wapbl_malloc(s) malloc((s), M_WAPBL, M_WAITOK)
65 #define wapbl_free(a) free((a), M_WAPBL)
66 #define wapbl_calloc(n, s) malloc((n)*(s), M_WAPBL, M_WAITOK | M_ZERO)
67
68 #else /* !_KERNEL */
69 #include <assert.h>
70 #include <errno.h>
71 #include <stdio.h>
72 #include <stdbool.h>
73 #include <stdlib.h>
74 #include <string.h>
75
76 #include <sys/time.h>
77 #include <sys/wapbl.h>
78
79 #define KDASSERT(x) assert(x)
80 #define KASSERT(x) assert(x)
81 #define wapbl_malloc(s) malloc(s)
82 #define wapbl_free(a) free(a)
83 #define wapbl_calloc(n, s) calloc((n), (s))
84
85 #endif /* !_KERNEL */
86
87 /*
88 * INTERNAL DATA STRUCTURES
89 */
90
91 /*
92 * This structure holds per-mount log information.
93 *
94 * Legend: a = atomic access only
95 * r = read-only after init
96 * l = rwlock held
97 * m = mutex held
98 * lm = rwlock held writing or mutex held
99 * u = unlocked access ok
100 * b = bufcache_lock held
101 */
102 struct wapbl {
103 struct vnode *wl_logvp; /* r: log here */
104 struct vnode *wl_devvp; /* r: log on this device */
105 struct mount *wl_mount; /* r: mountpoint wl is associated with */
106 daddr_t wl_logpbn; /* r: Physical block number of start of log */
107 int wl_log_dev_bshift; /* r: logarithm of device block size of log
108 device */
109 int wl_fs_dev_bshift; /* r: logarithm of device block size of
110 filesystem device */
111
112 unsigned wl_lock_count; /* m: Count of transactions in progress */
113
114 size_t wl_circ_size; /* r: Number of bytes in buffer of log */
115 size_t wl_circ_off; /* r: Number of bytes reserved at start */
116
117 size_t wl_bufcount_max; /* r: Number of buffers reserved for log */
118 size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */
119
120 off_t wl_head; /* l: Byte offset of log head */
121 off_t wl_tail; /* l: Byte offset of log tail */
122 /*
123 * head == tail == 0 means log is empty
124 * head == tail != 0 means log is full
125 * see assertions in wapbl_advance() for other boundary conditions.
126 * only truncate moves the tail, except when flush sets it to
127 * wl_header_size only flush moves the head, except when truncate
128 * sets it to 0.
129 */
130
131 struct wapbl_wc_header *wl_wc_header; /* l */
132 void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */
133
134 kmutex_t wl_mtx; /* u: short-term lock */
135 krwlock_t wl_rwlock; /* u: File system transaction lock */
136
137 /*
138 * Must be held while accessing
139 * wl_count or wl_bufs or head or tail
140 */
141
142 /*
143 * Callback called from within the flush routine to flush any extra
144 * bits. Note that flush may be skipped without calling this if
145 * there are no outstanding buffers in the transaction.
146 */
147 wapbl_flush_fn_t wl_flush; /* r */
148 wapbl_flush_fn_t wl_flush_abort;/* r */
149
150 size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */
151 size_t wl_bufcount; /* m: Count of buffers in wl_bufs */
152 size_t wl_bcount; /* m: Total bcount of wl_bufs */
153
154 LIST_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
155
156 kcondvar_t wl_reclaimable_cv; /* m (obviously) */
157 size_t wl_reclaimable_bytes; /* m: Amount of space available for
158 reclamation by truncate */
159 int wl_error_count; /* m: # of wl_entries with errors */
160 size_t wl_reserved_bytes; /* never truncate log smaller than this */
161
162 #ifdef WAPBL_DEBUG_BUFBYTES
163 size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
164 #endif
165
166 daddr_t *wl_deallocblks;/* lm: address of block */
167 int *wl_dealloclens; /* lm: size of block */
168 int wl_dealloccnt; /* lm: total count */
169 int wl_dealloclim; /* l: max count */
170
171 /* hashtable of inode numbers for allocated but unlinked inodes */
172 /* synch ??? */
173 LIST_HEAD(wapbl_ino_head, wapbl_ino) *wl_inohash;
174 u_long wl_inohashmask;
175 int wl_inohashcnt;
176
177 SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* On disk transaction
178 accounting */
179 };
180
181 #ifdef WAPBL_DEBUG_PRINT
182 int wapbl_debug_print = WAPBL_DEBUG_PRINT;
183 #endif
184
185 /****************************************************************/
186 #ifdef _KERNEL
187
188 #ifdef WAPBL_DEBUG
189 struct wapbl *wapbl_debug_wl;
190 #endif
191
192 static int wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail);
193 static int wapbl_write_blocks(struct wapbl *wl, off_t *offp);
194 static int wapbl_write_revocations(struct wapbl *wl, off_t *offp);
195 static int wapbl_write_inodes(struct wapbl *wl, off_t *offp);
196 #endif /* _KERNEL */
197
198 static int wapbl_replay_prescan(struct wapbl_replay *wr);
199 static int wapbl_replay_get_inodes(struct wapbl_replay *wr);
200
201 static __inline size_t wapbl_space_free(size_t avail, off_t head,
202 off_t tail);
203 static __inline size_t wapbl_space_used(size_t avail, off_t head,
204 off_t tail);
205
206 #ifdef _KERNEL
207
208 #define WAPBL_INODETRK_SIZE 83
209 static int wapbl_ino_pool_refcount;
210 static struct pool wapbl_ino_pool;
211 struct wapbl_ino {
212 LIST_ENTRY(wapbl_ino) wi_hash;
213 ino_t wi_ino;
214 mode_t wi_mode;
215 };
216
217 static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
218 static void wapbl_inodetrk_free(struct wapbl *wl);
219 static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
220
221 static size_t wapbl_transaction_len(struct wapbl *wl);
222 static __inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
223
224 /*
225 * This is useful for debugging. If set, the log will
226 * only be truncated when necessary.
227 */
228 int wapbl_lazy_truncate = 0;
229
230 struct wapbl_ops wapbl_ops = {
231 .wo_wapbl_discard = wapbl_discard,
232 .wo_wapbl_replay_isopen = wapbl_replay_isopen1,
233 .wo_wapbl_replay_read = wapbl_replay_read,
234 .wo_wapbl_add_buf = wapbl_add_buf,
235 .wo_wapbl_remove_buf = wapbl_remove_buf,
236 .wo_wapbl_resize_buf = wapbl_resize_buf,
237 .wo_wapbl_begin = wapbl_begin,
238 .wo_wapbl_end = wapbl_end,
239 .wo_wapbl_junlock_assert= wapbl_junlock_assert,
240
241 /* XXX: the following is only used to say "this is a wapbl buf" */
242 .wo_wapbl_biodone = wapbl_biodone,
243 };
244
245 void
246 wapbl_init()
247 {
248
249 malloc_type_attach(M_WAPBL);
250 }
251
252 int
253 wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
254 daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
255 wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
256 {
257 struct wapbl *wl;
258 struct vnode *devvp;
259 daddr_t logpbn;
260 int error;
261 int log_dev_bshift = DEV_BSHIFT;
262 int fs_dev_bshift = DEV_BSHIFT;
263 int run;
264
265 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_start: vp=%p off=%" PRId64
266 " count=%zu blksize=%zu\n", vp, off, count, blksize));
267
268 if (log_dev_bshift > fs_dev_bshift) {
269 WAPBL_PRINTF(WAPBL_PRINT_OPEN,
270 ("wapbl: log device's block size cannot be larger "
271 "than filesystem's\n"));
272 /*
273 * Not currently implemented, although it could be if
274 * needed someday.
275 */
276 return ENOSYS;
277 }
278
279 if (off < 0)
280 return EINVAL;
281
282 if (blksize < DEV_BSIZE)
283 return EINVAL;
284 if (blksize % DEV_BSIZE)
285 return EINVAL;
286
287 /* XXXTODO: verify that the full load is writable */
288
289 /*
290 * XXX check for minimum log size
291 * minimum is governed by minimum amount of space
292 * to complete a transaction. (probably truncate)
293 */
294 /* XXX for now pick something minimal */
295 if ((count * blksize) < MAXPHYS) {
296 return ENOSPC;
297 }
298
299 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
300 return error;
301 }
302
303 wl = wapbl_calloc(1, sizeof(*wl));
304 rw_init(&wl->wl_rwlock);
305 mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
306 cv_init(&wl->wl_reclaimable_cv, "wapblrec");
307 LIST_INIT(&wl->wl_bufs);
308 SIMPLEQ_INIT(&wl->wl_entries);
309
310 wl->wl_logvp = vp;
311 wl->wl_devvp = devvp;
312 wl->wl_mount = mp;
313 wl->wl_logpbn = logpbn;
314 wl->wl_log_dev_bshift = log_dev_bshift;
315 wl->wl_fs_dev_bshift = fs_dev_bshift;
316
317 wl->wl_flush = flushfn;
318 wl->wl_flush_abort = flushabortfn;
319
320 /* Reserve two log device blocks for the commit headers */
321 wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
322 wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
323 /* truncate the log usage to a multiple of log_dev_bshift */
324 wl->wl_circ_size >>= wl->wl_log_dev_bshift;
325 wl->wl_circ_size <<= wl->wl_log_dev_bshift;
326
327 /*
328 * wl_bufbytes_max limits the size of the in memory transaction space.
329 * - Since buffers are allocated and accounted for in units of
330 * PAGE_SIZE it is required to be a multiple of PAGE_SIZE
331 * (i.e. 1<<PAGE_SHIFT)
332 * - Since the log device has to be written in units of
333 * 1<<wl_log_dev_bshift it is required to be a mulitple of
334 * 1<<wl_log_dev_bshift.
335 * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
336 * it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
337 * Therefore it must be multiple of the least common multiple of those
338 * three quantities. Fortunately, all of those quantities are
339 * guaranteed to be a power of two, and the least common multiple of
340 * a set of numbers which are all powers of two is simply the maximum
341 * of those numbers. Finally, the maximum logarithm of a power of two
342 * is the same as the log of the maximum power of two. So we can do
343 * the following operations to size wl_bufbytes_max:
344 */
345
346 /* XXX fix actual number of pages reserved per filesystem. */
347 wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
348
349 /* Round wl_bufbytes_max to the largest power of two constraint */
350 wl->wl_bufbytes_max >>= PAGE_SHIFT;
351 wl->wl_bufbytes_max <<= PAGE_SHIFT;
352 wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
353 wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
354 wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
355 wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
356
357 /* XXX maybe use filesystem fragment size instead of 1024 */
358 /* XXX fix actual number of buffers reserved per filesystem. */
359 wl->wl_bufcount_max = (nbuf / 2) * 1024;
360
361 /* XXX tie this into resource estimation */
362 wl->wl_dealloclim = 2 * btodb(wl->wl_bufbytes_max);
363
364 #if WAPBL_UVM_ALLOC
365 wl->wl_deallocblks = (void *) uvm_km_zalloc(kernel_map,
366 round_page(sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim));
367 KASSERT(wl->wl_deallocblks != NULL);
368 wl->wl_dealloclens = (void *) uvm_km_zalloc(kernel_map,
369 round_page(sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim));
370 KASSERT(wl->wl_dealloclens != NULL);
371 #else
372 wl->wl_deallocblks = wapbl_malloc(sizeof(*wl->wl_deallocblks) *
373 wl->wl_dealloclim);
374 wl->wl_dealloclens = wapbl_malloc(sizeof(*wl->wl_dealloclens) *
375 wl->wl_dealloclim);
376 #endif
377
378 wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
379
380 /* Initialize the commit header */
381 {
382 struct wapbl_wc_header *wc;
383 size_t len = 1<<wl->wl_log_dev_bshift;
384 wc = wapbl_calloc(1, len);
385 wc->wc_type = WAPBL_WC_HEADER;
386 wc->wc_len = len;
387 wc->wc_circ_off = wl->wl_circ_off;
388 wc->wc_circ_size = wl->wl_circ_size;
389 /* XXX wc->wc_fsid */
390 wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
391 wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
392 wl->wl_wc_header = wc;
393 wl->wl_wc_scratch = wapbl_malloc(len);
394 }
395
396 /*
397 * if there was an existing set of unlinked but
398 * allocated inodes, preserve it in the new
399 * log.
400 */
401 if (wr && wr->wr_inodescnt) {
402 int i;
403
404 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
405 ("wapbl_start: reusing log with %d inodes\n",
406 wr->wr_inodescnt));
407
408 /*
409 * Its only valid to reuse the replay log if its
410 * the same as the new log we just opened.
411 */
412 KDASSERT(!wapbl_replay_isopen(wr));
413 KASSERT(devvp->v_rdev == wr->wr_devvp->v_rdev);
414 KASSERT(logpbn == wr->wr_logpbn);
415 KASSERT(wl->wl_circ_size == wr->wr_wc_header.wc_circ_size);
416 KASSERT(wl->wl_circ_off == wr->wr_wc_header.wc_circ_off);
417 KASSERT(wl->wl_log_dev_bshift ==
418 wr->wr_wc_header.wc_log_dev_bshift);
419 KASSERT(wl->wl_fs_dev_bshift ==
420 wr->wr_wc_header.wc_fs_dev_bshift);
421
422 wl->wl_wc_header->wc_generation =
423 wr->wr_wc_header.wc_generation + 1;
424
425 for (i = 0; i < wr->wr_inodescnt; i++)
426 wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
427 wr->wr_inodes[i].wr_imode);
428
429 /* Make sure new transaction won't overwrite old inodes list */
430 KDASSERT(wapbl_transaction_len(wl) <=
431 wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
432 wr->wr_inodestail));
433
434 wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
435 wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
436 wapbl_transaction_len(wl);
437
438 error = wapbl_write_inodes(wl, &wl->wl_head);
439 if (error)
440 goto errout;
441
442 KASSERT(wl->wl_head != wl->wl_tail);
443 KASSERT(wl->wl_head != 0);
444 }
445
446 error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
447 if (error) {
448 goto errout;
449 }
450
451 *wlp = wl;
452 #if defined(WAPBL_DEBUG)
453 wapbl_debug_wl = wl;
454 #endif
455
456 return 0;
457 errout:
458 wapbl_discard(wl);
459 wapbl_free(wl->wl_wc_scratch);
460 wapbl_free(wl->wl_wc_header);
461 #if WAPBL_UVM_ALLOC
462 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_deallocblks,
463 round_page(sizeof(*wl->wl_deallocblks *
464 wl->wl_dealloclim)));
465 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_dealloclens,
466 round_page(sizeof(*wl->wl_dealloclens *
467 wl->wl_dealloclim)));
468 #else
469 wapbl_free(wl->wl_deallocblks);
470 wapbl_free(wl->wl_dealloclens);
471 #endif
472 wapbl_inodetrk_free(wl);
473 wapbl_free(wl);
474
475 return error;
476 }
477
478 /*
479 * Like wapbl_flush, only discards the transaction
480 * completely
481 */
482
483 void
484 wapbl_discard(struct wapbl *wl)
485 {
486 struct wapbl_entry *we;
487 struct buf *bp;
488 int i;
489
490 /*
491 * XXX we may consider using upgrade here
492 * if we want to call flush from inside a transaction
493 */
494 rw_enter(&wl->wl_rwlock, RW_WRITER);
495 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
496 wl->wl_dealloccnt);
497
498 #ifdef WAPBL_DEBUG_PRINT
499 {
500 struct wapbl_entry *we;
501 pid_t pid = -1;
502 lwpid_t lid = -1;
503 if (curproc)
504 pid = curproc->p_pid;
505 if (curlwp)
506 lid = curlwp->l_lid;
507 #ifdef WAPBL_DEBUG_BUFBYTES
508 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
509 ("wapbl_discard: thread %d.%d discarding "
510 "transaction\n"
511 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
512 "deallocs=%d inodes=%d\n"
513 "\terrcnt = %u, reclaimable=%zu reserved=%zu "
514 "unsynced=%zu\n",
515 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
516 wl->wl_bcount, wl->wl_dealloccnt,
517 wl->wl_inohashcnt, wl->wl_error_count,
518 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
519 wl->wl_unsynced_bufbytes));
520 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
521 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
522 ("\tentry: bufcount = %zu, reclaimable = %zu, "
523 "error = %d, unsynced = %zu\n",
524 we->we_bufcount, we->we_reclaimable_bytes,
525 we->we_error, we->we_unsynced_bufbytes));
526 }
527 #else /* !WAPBL_DEBUG_BUFBYTES */
528 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
529 ("wapbl_discard: thread %d.%d discarding transaction\n"
530 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
531 "deallocs=%d inodes=%d\n"
532 "\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
533 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
534 wl->wl_bcount, wl->wl_dealloccnt,
535 wl->wl_inohashcnt, wl->wl_error_count,
536 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes));
537 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
538 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
539 ("\tentry: bufcount = %zu, reclaimable = %zu, "
540 "error = %d\n",
541 we->we_bufcount, we->we_reclaimable_bytes,
542 we->we_error));
543 }
544 #endif /* !WAPBL_DEBUG_BUFBYTES */
545 }
546 #endif /* WAPBL_DEBUG_PRINT */
547
548 for (i = 0; i <= wl->wl_inohashmask; i++) {
549 struct wapbl_ino_head *wih;
550 struct wapbl_ino *wi;
551
552 wih = &wl->wl_inohash[i];
553 while ((wi = LIST_FIRST(wih)) != NULL) {
554 LIST_REMOVE(wi, wi_hash);
555 pool_put(&wapbl_ino_pool, wi);
556 KASSERT(wl->wl_inohashcnt > 0);
557 wl->wl_inohashcnt--;
558 }
559 }
560
561 /*
562 * clean buffer list
563 */
564 mutex_enter(&bufcache_lock);
565 mutex_enter(&wl->wl_mtx);
566 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
567 if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
568 /*
569 * The buffer will be unlocked and
570 * removed from the transaction in brelse
571 */
572 mutex_exit(&wl->wl_mtx);
573 brelsel(bp, 0);
574 mutex_enter(&wl->wl_mtx);
575 }
576 }
577 mutex_exit(&wl->wl_mtx);
578 mutex_exit(&bufcache_lock);
579
580 /*
581 * Remove references to this wl from wl_entries, free any which
582 * no longer have buffers, others will be freed in wapbl_biodone
583 * when they no longer have any buffers.
584 */
585 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
586 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
587 /* XXX should we be accumulating wl_error_count
588 * and increasing reclaimable bytes ? */
589 we->we_wapbl = NULL;
590 if (we->we_bufcount == 0) {
591 #ifdef WAPBL_DEBUG_BUFBYTES
592 KASSERT(we->we_unsynced_bufbytes == 0);
593 #endif
594 wapbl_free(we);
595 }
596 }
597
598 /* Discard list of deallocs */
599 wl->wl_dealloccnt = 0;
600 /* XXX should we clear wl_reserved_bytes? */
601
602 KASSERT(wl->wl_bufbytes == 0);
603 KASSERT(wl->wl_bcount == 0);
604 KASSERT(wl->wl_bufcount == 0);
605 KASSERT(LIST_EMPTY(&wl->wl_bufs));
606 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
607 KASSERT(wl->wl_inohashcnt == 0);
608
609 rw_exit(&wl->wl_rwlock);
610 }
611
612 int
613 wapbl_stop(struct wapbl *wl, int force)
614 {
615 struct vnode *vp;
616 int error;
617
618 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
619 error = wapbl_flush(wl, 1);
620 if (error) {
621 if (force)
622 wapbl_discard(wl);
623 else
624 return error;
625 }
626
627 /* Unlinked inodes persist after a flush */
628 if (wl->wl_inohashcnt) {
629 if (force) {
630 wapbl_discard(wl);
631 } else {
632 return EBUSY;
633 }
634 }
635
636 KASSERT(wl->wl_bufbytes == 0);
637 KASSERT(wl->wl_bcount == 0);
638 KASSERT(wl->wl_bufcount == 0);
639 KASSERT(LIST_EMPTY(&wl->wl_bufs));
640 KASSERT(wl->wl_dealloccnt == 0);
641 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
642 KASSERT(wl->wl_inohashcnt == 0);
643
644 vp = wl->wl_logvp;
645
646 wapbl_free(wl->wl_wc_scratch);
647 wapbl_free(wl->wl_wc_header);
648 #if WAPBL_UVM_ALLOC
649 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_deallocblks,
650 round_page(sizeof(*wl->wl_deallocblks *
651 wl->wl_dealloclim)));
652 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_dealloclens,
653 round_page(sizeof(*wl->wl_dealloclens *
654 wl->wl_dealloclim)));
655 #else
656 wapbl_free(wl->wl_deallocblks);
657 wapbl_free(wl->wl_dealloclens);
658 #endif
659 wapbl_inodetrk_free(wl);
660
661 cv_destroy(&wl->wl_reclaimable_cv);
662 mutex_destroy(&wl->wl_mtx);
663 rw_destroy(&wl->wl_rwlock);
664 wapbl_free(wl);
665
666 return 0;
667 }
668
669 static int
670 wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
671 {
672 struct pstats *pstats = curlwp->l_proc->p_stats;
673 struct buf *bp;
674 int error;
675
676 KASSERT((flags & ~(B_WRITE | B_READ)) == 0);
677 KASSERT(devvp->v_type == VBLK);
678
679 if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
680 mutex_enter(&devvp->v_interlock);
681 devvp->v_numoutput++;
682 mutex_exit(&devvp->v_interlock);
683 pstats->p_ru.ru_oublock++;
684 } else {
685 pstats->p_ru.ru_inblock++;
686 }
687
688 bp = getiobuf(devvp, true);
689 bp->b_flags = flags;
690 bp->b_cflags = BC_BUSY; /* silly & dubious */
691 bp->b_dev = devvp->v_rdev;
692 bp->b_data = data;
693 bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
694 bp->b_blkno = pbn;
695
696 WAPBL_PRINTF(WAPBL_PRINT_IO,
697 ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%x\n",
698 BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
699 bp->b_blkno, bp->b_dev));
700
701 VOP_STRATEGY(devvp, bp);
702
703 error = biowait(bp);
704 putiobuf(bp);
705
706 if (error) {
707 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
708 ("wapbl_doio: %s %zu bytes at block %" PRId64
709 " on dev 0x%x failed with error %d\n",
710 (((flags & (B_WRITE | B_READ)) == B_WRITE) ?
711 "write" : "read"),
712 len, pbn, devvp->v_rdev, error));
713 }
714
715 return error;
716 }
717
718 int
719 wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
720 {
721
722 return wapbl_doio(data, len, devvp, pbn, B_WRITE);
723 }
724
725 int
726 wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
727 {
728
729 return wapbl_doio(data, len, devvp, pbn, B_READ);
730 }
731
732 /*
733 * Off is byte offset returns new offset for next write
734 * handles log wraparound
735 */
736 static int
737 wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
738 {
739 size_t slen;
740 off_t off = *offp;
741 int error;
742
743 KDASSERT(((len >> wl->wl_log_dev_bshift) <<
744 wl->wl_log_dev_bshift) == len);
745
746 if (off < wl->wl_circ_off)
747 off = wl->wl_circ_off;
748 slen = wl->wl_circ_off + wl->wl_circ_size - off;
749 if (slen < len) {
750 error = wapbl_write(data, slen, wl->wl_devvp,
751 wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
752 if (error)
753 return error;
754 data = (uint8_t *)data + slen;
755 len -= slen;
756 off = wl->wl_circ_off;
757 }
758 error = wapbl_write(data, len, wl->wl_devvp,
759 wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
760 if (error)
761 return error;
762 off += len;
763 if (off >= wl->wl_circ_off + wl->wl_circ_size)
764 off = wl->wl_circ_off;
765 *offp = off;
766 return 0;
767 }
768
769 /****************************************************************/
770
771 int
772 wapbl_begin(struct wapbl *wl, const char *file, int line)
773 {
774 int doflush;
775 unsigned lockcount;
776
777 KDASSERT(wl);
778
779 /*
780 * XXX this needs to be made much more sophisticated.
781 * perhaps each wapbl_begin could reserve a specified
782 * number of buffers and bytes.
783 */
784 mutex_enter(&wl->wl_mtx);
785 lockcount = wl->wl_lock_count;
786 doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) >
787 wl->wl_bufbytes_max / 2) ||
788 ((wl->wl_bufcount + (lockcount * 10)) >
789 wl->wl_bufcount_max / 2) ||
790 (wapbl_transaction_len(wl) > wl->wl_circ_size / 2);
791 mutex_exit(&wl->wl_mtx);
792
793 if (doflush) {
794 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
795 ("force flush lockcnt=%d bufbytes=%zu "
796 "(max=%zu) bufcount=%zu (max=%zu)\n",
797 lockcount, wl->wl_bufbytes,
798 wl->wl_bufbytes_max, wl->wl_bufcount,
799 wl->wl_bufcount_max));
800 }
801
802 if (doflush) {
803 int error = wapbl_flush(wl, 0);
804 if (error)
805 return error;
806 }
807
808 rw_enter(&wl->wl_rwlock, RW_READER);
809 mutex_enter(&wl->wl_mtx);
810 wl->wl_lock_count++;
811 mutex_exit(&wl->wl_mtx);
812
813 #if defined(WAPBL_DEBUG_PRINT)
814 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
815 ("wapbl_begin thread %d.%d with bufcount=%zu "
816 "bufbytes=%zu bcount=%zu at %s:%d\n",
817 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
818 wl->wl_bufbytes, wl->wl_bcount, file, line));
819 #endif
820
821 return 0;
822 }
823
824 void
825 wapbl_end(struct wapbl *wl)
826 {
827
828 #if defined(WAPBL_DEBUG_PRINT)
829 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
830 ("wapbl_end thread %d.%d with bufcount=%zu "
831 "bufbytes=%zu bcount=%zu\n",
832 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
833 wl->wl_bufbytes, wl->wl_bcount));
834 #endif
835
836 mutex_enter(&wl->wl_mtx);
837 KASSERT(wl->wl_lock_count > 0);
838 wl->wl_lock_count--;
839 mutex_exit(&wl->wl_mtx);
840
841 rw_exit(&wl->wl_rwlock);
842 }
843
844 void
845 wapbl_add_buf(struct wapbl *wl, struct buf * bp)
846 {
847
848 KASSERT(bp->b_cflags & BC_BUSY);
849 KASSERT(bp->b_vp);
850
851 wapbl_jlock_assert(wl);
852
853 #if 0
854 /*
855 * XXX this might be an issue for swapfiles.
856 * see uvm_swap.c:1702
857 *
858 * XXX2 why require it then? leap of semantics?
859 */
860 KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
861 #endif
862
863 mutex_enter(&wl->wl_mtx);
864 if (bp->b_flags & B_LOCKED) {
865 LIST_REMOVE(bp, b_wapbllist);
866 WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
867 ("wapbl_add_buf thread %d.%d re-adding buf %p "
868 "with %d bytes %d bcount\n",
869 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
870 bp->b_bcount));
871 } else {
872 /* unlocked by dirty buffers shouldn't exist */
873 KASSERT(!(bp->b_oflags & BO_DELWRI));
874 wl->wl_bufbytes += bp->b_bufsize;
875 wl->wl_bcount += bp->b_bcount;
876 wl->wl_bufcount++;
877 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
878 ("wapbl_add_buf thread %d.%d adding buf %p "
879 "with %d bytes %d bcount\n",
880 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
881 bp->b_bcount));
882 }
883 LIST_INSERT_HEAD(&wl->wl_bufs, bp, b_wapbllist);
884 mutex_exit(&wl->wl_mtx);
885
886 bp->b_flags |= B_LOCKED;
887 }
888
889 static void
890 wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
891 {
892
893 KASSERT(mutex_owned(&wl->wl_mtx));
894 KASSERT(bp->b_cflags & BC_BUSY);
895 wapbl_jlock_assert(wl);
896
897 #if 0
898 /*
899 * XXX this might be an issue for swapfiles.
900 * see uvm_swap.c:1725
901 *
902 * XXXdeux: see above
903 */
904 KASSERT((bp->b_flags & BC_NOCACHE) == 0);
905 #endif
906 KASSERT(bp->b_flags & B_LOCKED);
907
908 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
909 ("wapbl_remove_buf thread %d.%d removing buf %p with "
910 "%d bytes %d bcount\n",
911 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, bp->b_bcount));
912
913 KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
914 wl->wl_bufbytes -= bp->b_bufsize;
915 KASSERT(wl->wl_bcount >= bp->b_bcount);
916 wl->wl_bcount -= bp->b_bcount;
917 KASSERT(wl->wl_bufcount > 0);
918 wl->wl_bufcount--;
919 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
920 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
921 LIST_REMOVE(bp, b_wapbllist);
922
923 bp->b_flags &= ~B_LOCKED;
924 }
925
926 /* called from brelsel() in vfs_bio among other places */
927 void
928 wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
929 {
930
931 mutex_enter(&wl->wl_mtx);
932 wapbl_remove_buf_locked(wl, bp);
933 mutex_exit(&wl->wl_mtx);
934 }
935
936 void
937 wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
938 {
939
940 KASSERT(bp->b_cflags & BC_BUSY);
941
942 /*
943 * XXX: why does this depend on B_LOCKED? otherwise the buf
944 * is not for a transaction? if so, why is this called in the
945 * first place?
946 */
947 if (bp->b_flags & B_LOCKED) {
948 mutex_enter(&wl->wl_mtx);
949 wl->wl_bufbytes += bp->b_bufsize - oldsz;
950 wl->wl_bcount += bp->b_bcount - oldcnt;
951 mutex_exit(&wl->wl_mtx);
952 }
953 }
954
955 #endif /* _KERNEL */
956
957 /****************************************************************/
958 /* Some utility inlines */
959
960 /* This is used to advance the pointer at old to new value at old+delta */
961 static __inline off_t
962 wapbl_advance(size_t size, size_t off, off_t old, size_t delta)
963 {
964 off_t new;
965
966 /* Define acceptable ranges for inputs. */
967 KASSERT(delta <= size);
968 KASSERT((old == 0) || (old >= off));
969 KASSERT(old < (size + off));
970
971 if ((old == 0) && (delta != 0))
972 new = off + delta;
973 else if ((old + delta) < (size + off))
974 new = old + delta;
975 else
976 new = (old + delta) - size;
977
978 /* Note some interesting axioms */
979 KASSERT((delta != 0) || (new == old));
980 KASSERT((delta == 0) || (new != 0));
981 KASSERT((delta != (size)) || (new == old));
982
983 /* Define acceptable ranges for output. */
984 KASSERT((new == 0) || (new >= off));
985 KASSERT(new < (size + off));
986 return new;
987 }
988
989 static __inline size_t
990 wapbl_space_used(size_t avail, off_t head, off_t tail)
991 {
992
993 if (tail == 0) {
994 KASSERT(head == 0);
995 return 0;
996 }
997 return ((head + (avail - 1) - tail) % avail) + 1;
998 }
999
1000 static __inline size_t
1001 wapbl_space_free(size_t avail, off_t head, off_t tail)
1002 {
1003
1004 return avail - wapbl_space_used(avail, head, tail);
1005 }
1006
1007 static __inline void
1008 wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1009 off_t *tailp)
1010 {
1011 off_t head = *headp;
1012 off_t tail = *tailp;
1013
1014 KASSERT(delta <= wapbl_space_free(size, head, tail));
1015 head = wapbl_advance(size, off, head, delta);
1016 if ((tail == 0) && (head != 0))
1017 tail = off;
1018 *headp = head;
1019 *tailp = tail;
1020 }
1021
1022 static __inline void
1023 wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1024 off_t *tailp)
1025 {
1026 off_t head = *headp;
1027 off_t tail = *tailp;
1028
1029 KASSERT(delta <= wapbl_space_used(size, head, tail));
1030 tail = wapbl_advance(size, off, tail, delta);
1031 if (head == tail) {
1032 head = tail = 0;
1033 }
1034 *headp = head;
1035 *tailp = tail;
1036 }
1037
1038 #ifdef _KERNEL
1039
1040 /****************************************************************/
1041
1042 /*
1043 * Remove transactions whose buffers are completely flushed to disk.
1044 * Will block until at least minfree space is available.
1045 * only intended to be called from inside wapbl_flush and therefore
1046 * does not protect against commit races with itself or with flush.
1047 */
1048 static int
1049 wapbl_truncate(struct wapbl *wl, size_t minfree, int waitonly)
1050 {
1051 size_t delta;
1052 size_t avail;
1053 off_t head;
1054 off_t tail;
1055 int error = 0;
1056
1057 KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1058 KASSERT(rw_write_held(&wl->wl_rwlock));
1059
1060 mutex_enter(&wl->wl_mtx);
1061
1062 /*
1063 * First check to see if we have to do a commit
1064 * at all.
1065 */
1066 avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1067 if (minfree < avail) {
1068 mutex_exit(&wl->wl_mtx);
1069 return 0;
1070 }
1071 minfree -= avail;
1072 while ((wl->wl_error_count == 0) &&
1073 (wl->wl_reclaimable_bytes < minfree)) {
1074 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1075 ("wapbl_truncate: sleeping on %p wl=%p bytes=%zd "
1076 "minfree=%zd\n",
1077 &wl->wl_reclaimable_bytes, wl, wl->wl_reclaimable_bytes,
1078 minfree));
1079
1080 cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1081 }
1082 if (wl->wl_reclaimable_bytes < minfree) {
1083 KASSERT(wl->wl_error_count);
1084 /* XXX maybe get actual error from buffer instead someday? */
1085 error = EIO;
1086 }
1087 head = wl->wl_head;
1088 tail = wl->wl_tail;
1089 delta = wl->wl_reclaimable_bytes;
1090
1091 /* If all of of the entries are flushed, then be sure to keep
1092 * the reserved bytes reserved. Watch out for discarded transactions,
1093 * which could leave more bytes reserved than are reclaimable.
1094 */
1095 if (SIMPLEQ_EMPTY(&wl->wl_entries) &&
1096 (delta >= wl->wl_reserved_bytes)) {
1097 delta -= wl->wl_reserved_bytes;
1098 }
1099 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1100 &tail);
1101 KDASSERT(wl->wl_reserved_bytes <=
1102 wapbl_space_used(wl->wl_circ_size, head, tail));
1103 mutex_exit(&wl->wl_mtx);
1104
1105 if (error)
1106 return error;
1107
1108 if (waitonly)
1109 return 0;
1110
1111 /*
1112 * This is where head, tail and delta are unprotected
1113 * from races against itself or flush. This is ok since
1114 * we only call this routine from inside flush itself.
1115 *
1116 * XXX: how can it race against itself when accessed only
1117 * from behind the write-locked rwlock?
1118 */
1119 error = wapbl_write_commit(wl, head, tail);
1120 if (error)
1121 return error;
1122
1123 wl->wl_head = head;
1124 wl->wl_tail = tail;
1125
1126 mutex_enter(&wl->wl_mtx);
1127 KASSERT(wl->wl_reclaimable_bytes >= delta);
1128 wl->wl_reclaimable_bytes -= delta;
1129 mutex_exit(&wl->wl_mtx);
1130 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1131 ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1132 curproc->p_pid, curlwp->l_lid, delta));
1133
1134 return 0;
1135 }
1136
1137 /****************************************************************/
1138
1139 void
1140 wapbl_biodone(struct buf *bp)
1141 {
1142 struct wapbl_entry *we = bp->b_private;
1143 struct wapbl *wl = we->we_wapbl;
1144
1145 /*
1146 * Handle possible flushing of buffers after log has been
1147 * decomissioned.
1148 */
1149 if (!wl) {
1150 KASSERT(we->we_bufcount > 0);
1151 we->we_bufcount--;
1152 #ifdef WAPBL_DEBUG_BUFBYTES
1153 KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1154 we->we_unsynced_bufbytes -= bp->b_bufsize;
1155 #endif
1156
1157 if (we->we_bufcount == 0) {
1158 #ifdef WAPBL_DEBUG_BUFBYTES
1159 KASSERT(we->we_unsynced_bufbytes == 0);
1160 #endif
1161 wapbl_free(we);
1162 }
1163
1164 brelse(bp, 0);
1165 return;
1166 }
1167
1168 #ifdef ohbother
1169 KDASSERT(bp->b_flags & B_DONE);
1170 KDASSERT(!(bp->b_flags & B_DELWRI));
1171 KDASSERT(bp->b_flags & B_ASYNC);
1172 KDASSERT(bp->b_flags & B_BUSY);
1173 KDASSERT(!(bp->b_flags & B_LOCKED));
1174 KDASSERT(!(bp->b_flags & B_READ));
1175 KDASSERT(!(bp->b_flags & B_INVAL));
1176 KDASSERT(!(bp->b_flags & B_NOCACHE));
1177 #endif
1178
1179 if (bp->b_error) {
1180 #ifdef notyet /* Can't currently handle possible dirty buffer reuse */
1181 XXXpooka: interfaces not fully updated
1182 Note: this was not enabled in the original patch
1183 against netbsd4 either. I don't know if comment
1184 above is true or not.
1185
1186 /*
1187 * If an error occurs, report the error and leave the
1188 * buffer as a delayed write on the LRU queue.
1189 * restarting the write would likely result in
1190 * an error spinloop, so let it be done harmlessly
1191 * by the syncer.
1192 */
1193 bp->b_flags &= ~(B_DONE);
1194 simple_unlock(&bp->b_interlock);
1195
1196 if (we->we_error == 0) {
1197 mutex_enter(&wl->wl_mtx);
1198 wl->wl_error_count++;
1199 mutex_exit(&wl->wl_mtx);
1200 cv_broadcast(&wl->wl_reclaimable_cv);
1201 }
1202 we->we_error = bp->b_error;
1203 bp->b_error = 0;
1204 brelse(bp);
1205 return;
1206 #else
1207 /* For now, just mark the log permanently errored out */
1208
1209 mutex_enter(&wl->wl_mtx);
1210 if (wl->wl_error_count == 0) {
1211 wl->wl_error_count++;
1212 cv_broadcast(&wl->wl_reclaimable_cv);
1213 }
1214 mutex_exit(&wl->wl_mtx);
1215 #endif
1216 }
1217
1218 mutex_enter(&wl->wl_mtx);
1219
1220 KASSERT(we->we_bufcount > 0);
1221 we->we_bufcount--;
1222 #ifdef WAPBL_DEBUG_BUFBYTES
1223 KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1224 we->we_unsynced_bufbytes -= bp->b_bufsize;
1225 KASSERT(wl->wl_unsynced_bufbytes >= bp->b_bufsize);
1226 wl->wl_unsynced_bufbytes -= bp->b_bufsize;
1227 #endif
1228
1229 /*
1230 * If the current transaction can be reclaimed, start
1231 * at the beginning and reclaim any consecutive reclaimable
1232 * transactions. If we successfully reclaim anything,
1233 * then wakeup anyone waiting for the reclaim.
1234 */
1235 if (we->we_bufcount == 0) {
1236 size_t delta = 0;
1237 int errcnt = 0;
1238 #ifdef WAPBL_DEBUG_BUFBYTES
1239 KDASSERT(we->we_unsynced_bufbytes == 0);
1240 #endif
1241 /*
1242 * clear any posted error, since the buffer it came from
1243 * has successfully flushed by now
1244 */
1245 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1246 (we->we_bufcount == 0)) {
1247 delta += we->we_reclaimable_bytes;
1248 if (we->we_error)
1249 errcnt++;
1250 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1251 wapbl_free(we);
1252 }
1253
1254 if (delta) {
1255 wl->wl_reclaimable_bytes += delta;
1256 KASSERT(wl->wl_error_count >= errcnt);
1257 wl->wl_error_count -= errcnt;
1258 cv_broadcast(&wl->wl_reclaimable_cv);
1259 }
1260 }
1261
1262 mutex_exit(&wl->wl_mtx);
1263 brelse(bp, 0);
1264 }
1265
1266 /*
1267 * Write transactions to disk + start I/O for contents
1268 */
1269 int
1270 wapbl_flush(struct wapbl *wl, int waitfor)
1271 {
1272 struct buf *bp;
1273 struct wapbl_entry *we;
1274 off_t off;
1275 off_t head;
1276 off_t tail;
1277 size_t delta = 0;
1278 size_t flushsize;
1279 size_t reserved;
1280 int error = 0;
1281
1282 /*
1283 * Do a quick check to see if a full flush can be skipped
1284 * This assumes that the flush callback does not need to be called
1285 * unless there are other outstanding bufs.
1286 */
1287 if (!waitfor) {
1288 size_t nbufs;
1289 mutex_enter(&wl->wl_mtx); /* XXX need mutex here to
1290 protect the KASSERTS */
1291 nbufs = wl->wl_bufcount;
1292 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1293 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1294 mutex_exit(&wl->wl_mtx);
1295 if (nbufs == 0)
1296 return 0;
1297 }
1298
1299 /*
1300 * XXX we may consider using LK_UPGRADE here
1301 * if we want to call flush from inside a transaction
1302 */
1303 rw_enter(&wl->wl_rwlock, RW_WRITER);
1304 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
1305 wl->wl_dealloccnt);
1306
1307 /*
1308 * Now that we are fully locked and flushed,
1309 * do another check for nothing to do.
1310 */
1311 if (wl->wl_bufcount == 0) {
1312 goto out;
1313 }
1314
1315 #if 0
1316 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1317 ("wapbl_flush thread %d.%d flushing entries with "
1318 "bufcount=%zu bufbytes=%zu\n",
1319 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1320 wl->wl_bufbytes));
1321 #endif
1322
1323 /* Calculate amount of space needed to flush */
1324 flushsize = wapbl_transaction_len(wl);
1325
1326 if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1327 /*
1328 * XXX this could be handled more gracefully, perhaps place
1329 * only a partial transaction in the log and allow the
1330 * remaining to flush without the protection of the journal.
1331 */
1332 panic("wapbl_flush: current transaction too big to flush\n");
1333 }
1334
1335 error = wapbl_truncate(wl, flushsize, 0);
1336 if (error)
1337 goto out2;
1338
1339 off = wl->wl_head;
1340 KASSERT((off == 0) || ((off >= wl->wl_circ_off) &&
1341 (off < wl->wl_circ_off + wl->wl_circ_size)));
1342 error = wapbl_write_blocks(wl, &off);
1343 if (error)
1344 goto out2;
1345 error = wapbl_write_revocations(wl, &off);
1346 if (error)
1347 goto out2;
1348 error = wapbl_write_inodes(wl, &off);
1349 if (error)
1350 goto out2;
1351
1352 reserved = 0;
1353 if (wl->wl_inohashcnt)
1354 reserved = wapbl_transaction_inodes_len(wl);
1355
1356 head = wl->wl_head;
1357 tail = wl->wl_tail;
1358
1359 wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1360 &head, &tail);
1361 #ifdef WAPBL_DEBUG
1362 if (head != off) {
1363 panic("lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1364 " off=%"PRIdMAX" flush=%zu\n",
1365 (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1366 flushsize);
1367 }
1368 #else
1369 KASSERT(head == off);
1370 #endif
1371
1372 /* Opportunistically move the tail forward if we can */
1373 if (!wapbl_lazy_truncate) {
1374 mutex_enter(&wl->wl_mtx);
1375 delta = wl->wl_reclaimable_bytes;
1376 mutex_exit(&wl->wl_mtx);
1377 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1378 &head, &tail);
1379 }
1380
1381 error = wapbl_write_commit(wl, head, tail);
1382 if (error)
1383 goto out2;
1384
1385 /* poolme? or kmemme? */
1386 we = wapbl_calloc(1, sizeof(*we));
1387
1388 #ifdef WAPBL_DEBUG_BUFBYTES
1389 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1390 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1391 " unsynced=%zu"
1392 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1393 "inodes=%d\n",
1394 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1395 wapbl_space_used(wl->wl_circ_size, head, tail),
1396 wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1397 wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1398 wl->wl_inohashcnt));
1399 #else
1400 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1401 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1402 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1403 "inodes=%d\n",
1404 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1405 wapbl_space_used(wl->wl_circ_size, head, tail),
1406 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1407 wl->wl_dealloccnt, wl->wl_inohashcnt));
1408 #endif
1409
1410
1411 mutex_enter(&bufcache_lock);
1412 mutex_enter(&wl->wl_mtx);
1413
1414 wl->wl_reserved_bytes = reserved;
1415 wl->wl_head = head;
1416 wl->wl_tail = tail;
1417 KASSERT(wl->wl_reclaimable_bytes >= delta);
1418 wl->wl_reclaimable_bytes -= delta;
1419 wl->wl_dealloccnt = 0;
1420 #ifdef WAPBL_DEBUG_BUFBYTES
1421 wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1422 #endif
1423
1424 we->we_wapbl = wl;
1425 we->we_bufcount = wl->wl_bufcount;
1426 #ifdef WAPBL_DEBUG_BUFBYTES
1427 we->we_unsynced_bufbytes = wl->wl_bufbytes;
1428 #endif
1429 we->we_reclaimable_bytes = flushsize;
1430 we->we_error = 0;
1431 SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1432
1433 /*
1434 * this flushes bufs in reverse order than they were queued
1435 * it shouldn't matter, but if we care we could use TAILQ instead.
1436 * XXX Note they will get put on the lru queue when they flush
1437 * so we might actually want to change this to preserve order.
1438 */
1439 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
1440 if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1441 continue;
1442 }
1443 bp->b_iodone = wapbl_biodone;
1444 bp->b_private = we;
1445 bremfree(bp);
1446 wapbl_remove_buf_locked(wl, bp);
1447 mutex_exit(&wl->wl_mtx);
1448 mutex_exit(&bufcache_lock);
1449 bawrite(bp);
1450 mutex_enter(&bufcache_lock);
1451 mutex_enter(&wl->wl_mtx);
1452 }
1453 mutex_exit(&wl->wl_mtx);
1454 mutex_exit(&bufcache_lock);
1455
1456 #if 0
1457 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1458 ("wapbl_flush thread %d.%d done flushing entries...\n",
1459 curproc->p_pid, curlwp->l_lid));
1460 #endif
1461
1462 out:
1463
1464 /*
1465 * If the waitfor flag is set, don't return until everything is
1466 * fully flushed and the on disk log is empty.
1467 */
1468 if (waitfor) {
1469 error = wapbl_truncate(wl, wl->wl_circ_size -
1470 wl->wl_reserved_bytes, wapbl_lazy_truncate);
1471 }
1472
1473 out2:
1474 if (error) {
1475 wl->wl_flush_abort(wl->wl_mount, wl->wl_deallocblks,
1476 wl->wl_dealloclens, wl->wl_dealloccnt);
1477 }
1478
1479 #ifdef WAPBL_DEBUG_PRINT
1480 if (error) {
1481 pid_t pid = -1;
1482 lwpid_t lid = -1;
1483 if (curproc)
1484 pid = curproc->p_pid;
1485 if (curlwp)
1486 lid = curlwp->l_lid;
1487 mutex_enter(&wl->wl_mtx);
1488 #ifdef WAPBL_DEBUG_BUFBYTES
1489 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1490 ("wapbl_flush: thread %d.%d aborted flush: "
1491 "error = %d\n"
1492 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1493 "deallocs=%d inodes=%d\n"
1494 "\terrcnt = %d, reclaimable=%zu reserved=%zu "
1495 "unsynced=%zu\n",
1496 pid, lid, error, wl->wl_bufcount,
1497 wl->wl_bufbytes, wl->wl_bcount,
1498 wl->wl_dealloccnt, wl->wl_inohashcnt,
1499 wl->wl_error_count, wl->wl_reclaimable_bytes,
1500 wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1501 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1502 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1503 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1504 "error = %d, unsynced = %zu\n",
1505 we->we_bufcount, we->we_reclaimable_bytes,
1506 we->we_error, we->we_unsynced_bufbytes));
1507 }
1508 #else
1509 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1510 ("wapbl_flush: thread %d.%d aborted flush: "
1511 "error = %d\n"
1512 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1513 "deallocs=%d inodes=%d\n"
1514 "\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
1515 pid, lid, error, wl->wl_bufcount,
1516 wl->wl_bufbytes, wl->wl_bcount,
1517 wl->wl_dealloccnt, wl->wl_inohashcnt,
1518 wl->wl_error_count, wl->wl_reclaimable_bytes,
1519 wl->wl_reserved_bytes));
1520 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1521 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1522 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1523 "error = %d\n", we->we_bufcount,
1524 we->we_reclaimable_bytes, we->we_error));
1525 }
1526 #endif
1527 mutex_exit(&wl->wl_mtx);
1528 }
1529 #endif
1530
1531 rw_exit(&wl->wl_rwlock);
1532 return error;
1533 }
1534
1535 /****************************************************************/
1536
1537 void
1538 wapbl_jlock_assert(struct wapbl *wl)
1539 {
1540
1541 KASSERT(rw_lock_held(&wl->wl_rwlock));
1542 }
1543
1544 void
1545 wapbl_junlock_assert(struct wapbl *wl)
1546 {
1547
1548 KASSERT(!rw_write_held(&wl->wl_rwlock));
1549 }
1550
1551 /****************************************************************/
1552
1553 /* locks missing */
1554 void
1555 wapbl_print(struct wapbl *wl,
1556 int full,
1557 void (*pr)(const char *, ...))
1558 {
1559 struct buf *bp;
1560 struct wapbl_entry *we;
1561 (*pr)("wapbl %p", wl);
1562 (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
1563 wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
1564 (*pr)("circ = %zu, header = %zu, head = %"PRIdMAX" tail = %"PRIdMAX"\n",
1565 wl->wl_circ_size, wl->wl_circ_off,
1566 (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
1567 (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
1568 wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
1569 #ifdef WAPBL_DEBUG_BUFBYTES
1570 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1571 "reserved = %zu errcnt = %d unsynced = %zu\n",
1572 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1573 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1574 wl->wl_error_count, wl->wl_unsynced_bufbytes);
1575 #else
1576 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1577 "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
1578 wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1579 wl->wl_error_count);
1580 #endif
1581 (*pr)("\tdealloccnt = %d, dealloclim = %d\n",
1582 wl->wl_dealloccnt, wl->wl_dealloclim);
1583 (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
1584 wl->wl_inohashcnt, wl->wl_inohashmask);
1585 (*pr)("entries:\n");
1586 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1587 #ifdef WAPBL_DEBUG_BUFBYTES
1588 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
1589 "unsynced = %zu\n",
1590 we->we_bufcount, we->we_reclaimable_bytes,
1591 we->we_error, we->we_unsynced_bufbytes);
1592 #else
1593 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
1594 we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
1595 #endif
1596 }
1597 if (full) {
1598 int cnt = 0;
1599 (*pr)("bufs =");
1600 LIST_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
1601 if (!LIST_NEXT(bp, b_wapbllist)) {
1602 (*pr)(" %p", bp);
1603 } else if ((++cnt % 6) == 0) {
1604 (*pr)(" %p,\n\t", bp);
1605 } else {
1606 (*pr)(" %p,", bp);
1607 }
1608 }
1609 (*pr)("\n");
1610
1611 (*pr)("dealloced blks = ");
1612 {
1613 int i;
1614 cnt = 0;
1615 for (i = 0; i < wl->wl_dealloccnt; i++) {
1616 (*pr)(" %"PRId64":%d,",
1617 wl->wl_deallocblks[i],
1618 wl->wl_dealloclens[i]);
1619 if ((++cnt % 4) == 0) {
1620 (*pr)("\n\t");
1621 }
1622 }
1623 }
1624 (*pr)("\n");
1625
1626 (*pr)("registered inodes = ");
1627 {
1628 int i;
1629 cnt = 0;
1630 for (i = 0; i <= wl->wl_inohashmask; i++) {
1631 struct wapbl_ino_head *wih;
1632 struct wapbl_ino *wi;
1633
1634 wih = &wl->wl_inohash[i];
1635 LIST_FOREACH(wi, wih, wi_hash) {
1636 if (wi->wi_ino == 0)
1637 continue;
1638 (*pr)(" %"PRId32"/0%06"PRIo32",",
1639 wi->wi_ino, wi->wi_mode);
1640 if ((++cnt % 4) == 0) {
1641 (*pr)("\n\t");
1642 }
1643 }
1644 }
1645 (*pr)("\n");
1646 }
1647 }
1648 }
1649
1650 #if defined(WAPBL_DEBUG) || defined(DDB)
1651 void
1652 wapbl_dump(struct wapbl *wl)
1653 {
1654 #if defined(WAPBL_DEBUG)
1655 if (!wl)
1656 wl = wapbl_debug_wl;
1657 #endif
1658 if (!wl)
1659 return;
1660 wapbl_print(wl, 1, printf);
1661 }
1662 #endif
1663
1664 /****************************************************************/
1665
1666 void
1667 wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len)
1668 {
1669
1670 wapbl_jlock_assert(wl);
1671
1672 mutex_enter(&wl->wl_mtx);
1673 /* XXX should eventually instead tie this into resource estimation */
1674 /* XXX this KASSERT needs locking/mutex analysis */
1675 KASSERT(wl->wl_dealloccnt < wl->wl_dealloclim);
1676 wl->wl_deallocblks[wl->wl_dealloccnt] = blk;
1677 wl->wl_dealloclens[wl->wl_dealloccnt] = len;
1678 wl->wl_dealloccnt++;
1679 WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
1680 ("wapbl_register_deallocation: blk=%"PRId64" len=%d\n", blk, len));
1681 mutex_exit(&wl->wl_mtx);
1682 }
1683
1684 /****************************************************************/
1685
1686 static void
1687 wapbl_inodetrk_init(struct wapbl *wl, u_int size)
1688 {
1689
1690 wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
1691 if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) {
1692 pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0,
1693 "wapblinopl", &pool_allocator_nointr, IPL_NONE);
1694 }
1695 }
1696
1697 static void
1698 wapbl_inodetrk_free(struct wapbl *wl)
1699 {
1700
1701 /* XXX this KASSERT needs locking/mutex analysis */
1702 KASSERT(wl->wl_inohashcnt == 0);
1703 hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
1704 if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) {
1705 pool_destroy(&wapbl_ino_pool);
1706 }
1707 }
1708
1709 static struct wapbl_ino *
1710 wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
1711 {
1712 struct wapbl_ino_head *wih;
1713 struct wapbl_ino *wi;
1714
1715 KASSERT(mutex_owned(&wl->wl_mtx));
1716
1717 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1718 LIST_FOREACH(wi, wih, wi_hash) {
1719 if (ino == wi->wi_ino)
1720 return wi;
1721 }
1722 return 0;
1723 }
1724
1725 void
1726 wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1727 {
1728 struct wapbl_ino_head *wih;
1729 struct wapbl_ino *wi;
1730
1731 wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
1732
1733 mutex_enter(&wl->wl_mtx);
1734 if (wapbl_inodetrk_get(wl, ino) == NULL) {
1735 wi->wi_ino = ino;
1736 wi->wi_mode = mode;
1737 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1738 LIST_INSERT_HEAD(wih, wi, wi_hash);
1739 wl->wl_inohashcnt++;
1740 WAPBL_PRINTF(WAPBL_PRINT_INODE,
1741 ("wapbl_register_inode: ino=%"PRId64"\n", ino));
1742 mutex_exit(&wl->wl_mtx);
1743 } else {
1744 mutex_exit(&wl->wl_mtx);
1745 pool_put(&wapbl_ino_pool, wi);
1746 }
1747 }
1748
1749 void
1750 wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1751 {
1752 struct wapbl_ino *wi;
1753
1754 mutex_enter(&wl->wl_mtx);
1755 wi = wapbl_inodetrk_get(wl, ino);
1756 if (wi) {
1757 WAPBL_PRINTF(WAPBL_PRINT_INODE,
1758 ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
1759 KASSERT(wl->wl_inohashcnt > 0);
1760 wl->wl_inohashcnt--;
1761 LIST_REMOVE(wi, wi_hash);
1762 mutex_exit(&wl->wl_mtx);
1763
1764 pool_put(&wapbl_ino_pool, wi);
1765 } else {
1766 mutex_exit(&wl->wl_mtx);
1767 }
1768 }
1769
1770 /****************************************************************/
1771
1772 static __inline size_t
1773 wapbl_transaction_inodes_len(struct wapbl *wl)
1774 {
1775 int blocklen = 1<<wl->wl_log_dev_bshift;
1776 int iph;
1777
1778 /* Calculate number of inodes described in a inodelist header */
1779 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
1780 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
1781
1782 KASSERT(iph > 0);
1783
1784 return MAX(1, howmany(wl->wl_inohashcnt, iph))*blocklen;
1785 }
1786
1787
1788 /* Calculate amount of space a transaction will take on disk */
1789 static size_t
1790 wapbl_transaction_len(struct wapbl *wl)
1791 {
1792 int blocklen = 1<<wl->wl_log_dev_bshift;
1793 size_t len;
1794 int bph;
1795
1796 /* Calculate number of blocks described in a blocklist header */
1797 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1798 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1799
1800 KASSERT(bph > 0);
1801
1802 len = wl->wl_bcount;
1803 len += howmany(wl->wl_bufcount, bph)*blocklen;
1804 len += howmany(wl->wl_dealloccnt, bph)*blocklen;
1805 len += wapbl_transaction_inodes_len(wl);
1806
1807 return len;
1808 }
1809
1810 /*
1811 * Perform commit operation
1812 *
1813 * Note that generation number incrementation needs to
1814 * be protected against racing with other invocations
1815 * of wapbl_commit. This is ok since this routine
1816 * is only invoked from wapbl_flush
1817 */
1818 static int
1819 wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
1820 {
1821 struct wapbl_wc_header *wc = wl->wl_wc_header;
1822 struct timespec ts;
1823 int error;
1824 int force = 1;
1825
1826 /* XXX Calc checksum here, instead we do this for now */
1827 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1828 if (error) {
1829 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1830 ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1831 "returned %d\n", wl->wl_devvp->v_rdev, error));
1832 }
1833
1834 wc->wc_head = head;
1835 wc->wc_tail = tail;
1836 wc->wc_checksum = 0;
1837 wc->wc_version = 1;
1838 getnanotime(&ts);
1839 wc->wc_time = ts.tv_sec;;
1840 wc->wc_timensec = ts.tv_nsec;
1841
1842 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1843 ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
1844 (intmax_t)head, (intmax_t)tail));
1845
1846 /*
1847 * XXX if generation will rollover, then first zero
1848 * over second commit header before trying to write both headers.
1849 */
1850
1851 error = wapbl_write(wc, wc->wc_len, wl->wl_devvp,
1852 wl->wl_logpbn + wc->wc_generation % 2);
1853 if (error)
1854 return error;
1855
1856 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1857 if (error) {
1858 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1859 ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1860 "returned %d\n", wl->wl_devvp->v_rdev, error));
1861 }
1862
1863 /*
1864 * If the generation number was zero, write it out a second time.
1865 * This handles initialization and generation number rollover
1866 */
1867 if (wc->wc_generation++ == 0) {
1868 error = wapbl_write_commit(wl, head, tail);
1869 /*
1870 * This panic should be able to be removed if we do the
1871 * zero'ing mentioned above, and we are certain to roll
1872 * back generation number on failure.
1873 */
1874 if (error)
1875 panic("wapbl_write_commit: error writing duplicate "
1876 "log header: %d\n", error);
1877 }
1878 return 0;
1879 }
1880
1881 /* Returns new offset value */
1882 static int
1883 wapbl_write_blocks(struct wapbl *wl, off_t *offp)
1884 {
1885 struct wapbl_wc_blocklist *wc =
1886 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1887 int blocklen = 1<<wl->wl_log_dev_bshift;
1888 int bph;
1889 struct buf *bp;
1890 off_t off = *offp;
1891 int error;
1892
1893 KASSERT(rw_write_held(&wl->wl_rwlock));
1894
1895 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1896 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1897
1898 bp = LIST_FIRST(&wl->wl_bufs);
1899
1900 while (bp) {
1901 int cnt;
1902 struct buf *obp = bp;
1903
1904 KASSERT(bp->b_flags & B_LOCKED);
1905
1906 wc->wc_type = WAPBL_WC_BLOCKS;
1907 wc->wc_len = blocklen;
1908 wc->wc_blkcount = 0;
1909 while (bp && (wc->wc_blkcount < bph)) {
1910 /*
1911 * Make sure all the physical block numbers are up to
1912 * date. If this is not always true on a given
1913 * filesystem, then VOP_BMAP must be called. We
1914 * could call VOP_BMAP here, or else in the filesystem
1915 * specific flush callback, although neither of those
1916 * solutions allow us to take the vnode lock. If a
1917 * filesystem requires that we must take the vnode lock
1918 * to call VOP_BMAP, then we can probably do it in
1919 * bwrite when the vnode lock should already be held
1920 * by the invoking code.
1921 */
1922 KASSERT((bp->b_vp->v_type == VBLK) ||
1923 (bp->b_blkno != bp->b_lblkno));
1924 KASSERT(bp->b_blkno > 0);
1925
1926 wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
1927 wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
1928 wc->wc_len += bp->b_bcount;
1929 wc->wc_blkcount++;
1930 bp = LIST_NEXT(bp, b_wapbllist);
1931 }
1932 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1933 ("wapbl_write_blocks: len = %u off = %"PRIdMAX"\n",
1934 wc->wc_len, (intmax_t)off));
1935
1936 error = wapbl_circ_write(wl, wc, blocklen, &off);
1937 if (error)
1938 return error;
1939 bp = obp;
1940 cnt = 0;
1941 while (bp && (cnt++ < bph)) {
1942 error = wapbl_circ_write(wl, bp->b_data,
1943 bp->b_bcount, &off);
1944 if (error)
1945 return error;
1946 bp = LIST_NEXT(bp, b_wapbllist);
1947 }
1948 }
1949 *offp = off;
1950 return 0;
1951 }
1952
1953 static int
1954 wapbl_write_revocations(struct wapbl *wl, off_t *offp)
1955 {
1956 struct wapbl_wc_blocklist *wc =
1957 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1958 int i;
1959 int blocklen = 1<<wl->wl_log_dev_bshift;
1960 int bph;
1961 off_t off = *offp;
1962 int error;
1963
1964 if (wl->wl_dealloccnt == 0)
1965 return 0;
1966
1967 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1968 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1969
1970 i = 0;
1971 while (i < wl->wl_dealloccnt) {
1972 wc->wc_type = WAPBL_WC_REVOCATIONS;
1973 wc->wc_len = blocklen;
1974 wc->wc_blkcount = 0;
1975 while ((i < wl->wl_dealloccnt) && (wc->wc_blkcount < bph)) {
1976 wc->wc_blocks[wc->wc_blkcount].wc_daddr =
1977 wl->wl_deallocblks[i];
1978 wc->wc_blocks[wc->wc_blkcount].wc_dlen =
1979 wl->wl_dealloclens[i];
1980 wc->wc_blkcount++;
1981 i++;
1982 }
1983 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1984 ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
1985 wc->wc_len, (intmax_t)off));
1986 error = wapbl_circ_write(wl, wc, blocklen, &off);
1987 if (error)
1988 return error;
1989 }
1990 *offp = off;
1991 return 0;
1992 }
1993
1994 static int
1995 wapbl_write_inodes(struct wapbl *wl, off_t *offp)
1996 {
1997 struct wapbl_wc_inodelist *wc =
1998 (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
1999 int i;
2000 int blocklen = 1<<wl->wl_log_dev_bshift;
2001 off_t off = *offp;
2002 int error;
2003
2004 struct wapbl_ino_head *wih;
2005 struct wapbl_ino *wi;
2006 int iph;
2007
2008 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2009 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2010
2011 i = 0;
2012 wih = &wl->wl_inohash[0];
2013 wi = 0;
2014 do {
2015 wc->wc_type = WAPBL_WC_INODES;
2016 wc->wc_len = blocklen;
2017 wc->wc_inocnt = 0;
2018 wc->wc_clear = (i == 0);
2019 while ((i < wl->wl_inohashcnt) && (wc->wc_inocnt < iph)) {
2020 while (!wi) {
2021 KASSERT((wih - &wl->wl_inohash[0])
2022 <= wl->wl_inohashmask);
2023 wi = LIST_FIRST(wih++);
2024 }
2025 wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2026 wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2027 wc->wc_inocnt++;
2028 i++;
2029 wi = LIST_NEXT(wi, wi_hash);
2030 }
2031 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2032 ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2033 wc->wc_len, (intmax_t)off));
2034 error = wapbl_circ_write(wl, wc, blocklen, &off);
2035 if (error)
2036 return error;
2037 } while (i < wl->wl_inohashcnt);
2038
2039 *offp = off;
2040 return 0;
2041 }
2042
2043 #endif /* _KERNEL */
2044
2045 /****************************************************************/
2046
2047 #ifdef _KERNEL
2048 static struct pool wapbl_blk_pool;
2049 static int wapbl_blk_pool_refcount;
2050 #endif
2051 struct wapbl_blk {
2052 LIST_ENTRY(wapbl_blk) wb_hash;
2053 daddr_t wb_blk;
2054 off_t wb_off; /* Offset of this block in the log */
2055 };
2056 #define WAPBL_BLKPOOL_MIN 83
2057
2058 static void
2059 wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2060 {
2061 if (size < WAPBL_BLKPOOL_MIN)
2062 size = WAPBL_BLKPOOL_MIN;
2063 KASSERT(wr->wr_blkhash == 0);
2064 #ifdef _KERNEL
2065 wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2066 if (atomic_inc_uint_nv(&wapbl_blk_pool_refcount) == 1) {
2067 pool_init(&wapbl_blk_pool, sizeof(struct wapbl_blk), 0, 0, 0,
2068 "wapblblkpl", &pool_allocator_nointr, IPL_NONE);
2069 }
2070 #else /* ! _KERNEL */
2071 /* Manually implement hashinit */
2072 {
2073 int i;
2074 unsigned long hashsize;
2075 for (hashsize = 1; hashsize < size; hashsize <<= 1)
2076 continue;
2077 wr->wr_blkhash = wapbl_malloc(hashsize * sizeof(*wr->wr_blkhash));
2078 for (i = 0; i < hashsize; i++)
2079 LIST_INIT(&wr->wr_blkhash[i]);
2080 wr->wr_blkhashmask = hashsize - 1;
2081 }
2082 #endif /* ! _KERNEL */
2083 }
2084
2085 static void
2086 wapbl_blkhash_free(struct wapbl_replay *wr)
2087 {
2088 KASSERT(wr->wr_blkhashcnt == 0);
2089 #ifdef _KERNEL
2090 hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2091 if (atomic_dec_uint_nv(&wapbl_blk_pool_refcount) == 0) {
2092 pool_destroy(&wapbl_blk_pool);
2093 }
2094 #else /* ! _KERNEL */
2095 wapbl_free(wr->wr_blkhash);
2096 #endif /* ! _KERNEL */
2097 }
2098
2099 static struct wapbl_blk *
2100 wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2101 {
2102 struct wapbl_blk_head *wbh;
2103 struct wapbl_blk *wb;
2104 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2105 LIST_FOREACH(wb, wbh, wb_hash) {
2106 if (blk == wb->wb_blk)
2107 return wb;
2108 }
2109 return 0;
2110 }
2111
2112 static void
2113 wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2114 {
2115 struct wapbl_blk_head *wbh;
2116 struct wapbl_blk *wb;
2117 wb = wapbl_blkhash_get(wr, blk);
2118 if (wb) {
2119 KASSERT(wb->wb_blk == blk);
2120 wb->wb_off = off;
2121 } else {
2122 #ifdef _KERNEL
2123 wb = pool_get(&wapbl_blk_pool, PR_WAITOK);
2124 #else /* ! _KERNEL */
2125 wb = wapbl_malloc(sizeof(*wb));
2126 #endif /* ! _KERNEL */
2127 wb->wb_blk = blk;
2128 wb->wb_off = off;
2129 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2130 LIST_INSERT_HEAD(wbh, wb, wb_hash);
2131 wr->wr_blkhashcnt++;
2132 }
2133 }
2134
2135 static void
2136 wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2137 {
2138 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2139 if (wb) {
2140 KASSERT(wr->wr_blkhashcnt > 0);
2141 wr->wr_blkhashcnt--;
2142 LIST_REMOVE(wb, wb_hash);
2143 #ifdef _KERNEL
2144 pool_put(&wapbl_blk_pool, wb);
2145 #else /* ! _KERNEL */
2146 wapbl_free(wb);
2147 #endif /* ! _KERNEL */
2148 }
2149 }
2150
2151 static void
2152 wapbl_blkhash_clear(struct wapbl_replay *wr)
2153 {
2154 int i;
2155 for (i = 0; i <= wr->wr_blkhashmask; i++) {
2156 struct wapbl_blk *wb;
2157
2158 while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2159 KASSERT(wr->wr_blkhashcnt > 0);
2160 wr->wr_blkhashcnt--;
2161 LIST_REMOVE(wb, wb_hash);
2162 #ifdef _KERNEL
2163 pool_put(&wapbl_blk_pool, wb);
2164 #else /* ! _KERNEL */
2165 wapbl_free(wb);
2166 #endif /* ! _KERNEL */
2167 }
2168 }
2169 KASSERT(wr->wr_blkhashcnt == 0);
2170 }
2171
2172 /****************************************************************/
2173
2174 static int
2175 wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2176 {
2177 size_t slen;
2178 struct wapbl_wc_header *wc = &wr->wr_wc_header;
2179 off_t off = *offp;
2180 int error;
2181
2182 KASSERT(((len >> wc->wc_log_dev_bshift) <<
2183 wc->wc_log_dev_bshift) == len);
2184 if (off < wc->wc_circ_off)
2185 off = wc->wc_circ_off;
2186 slen = wc->wc_circ_off + wc->wc_circ_size - off;
2187 if (slen < len) {
2188 error = wapbl_read(data, slen, wr->wr_devvp,
2189 wr->wr_logpbn + (off >> wc->wc_log_dev_bshift));
2190 if (error)
2191 return error;
2192 data = (uint8_t *)data + slen;
2193 len -= slen;
2194 off = wc->wc_circ_off;
2195 }
2196 error = wapbl_read(data, len, wr->wr_devvp,
2197 wr->wr_logpbn + (off >> wc->wc_log_dev_bshift));
2198 if (error)
2199 return error;
2200 off += len;
2201 if (off >= wc->wc_circ_off + wc->wc_circ_size)
2202 off = wc->wc_circ_off;
2203 *offp = off;
2204 return 0;
2205 }
2206
2207 static void
2208 wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2209 {
2210 size_t slen;
2211 struct wapbl_wc_header *wc = &wr->wr_wc_header;
2212 off_t off = *offp;
2213
2214 KASSERT(((len >> wc->wc_log_dev_bshift) <<
2215 wc->wc_log_dev_bshift) == len);
2216
2217 if (off < wc->wc_circ_off)
2218 off = wc->wc_circ_off;
2219 slen = wc->wc_circ_off + wc->wc_circ_size - off;
2220 if (slen < len) {
2221 len -= slen;
2222 off = wc->wc_circ_off;
2223 }
2224 off += len;
2225 if (off >= wc->wc_circ_off + wc->wc_circ_size)
2226 off = wc->wc_circ_off;
2227 *offp = off;
2228 }
2229
2230 /****************************************************************/
2231
2232 int
2233 wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2234 daddr_t off, size_t count, size_t blksize)
2235 {
2236 struct wapbl_replay *wr;
2237 int error;
2238 struct vnode *devvp;
2239 daddr_t logpbn;
2240 uint8_t *scratch;
2241 struct wapbl_wc_header *wch;
2242 struct wapbl_wc_header *wch2;
2243 /* Use this until we read the actual log header */
2244 int log_dev_bshift = DEV_BSHIFT;
2245 size_t used;
2246
2247 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2248 ("wapbl_replay_start: vp=%p off=%"PRId64 " count=%zu blksize=%zu\n",
2249 vp, off, count, blksize));
2250
2251 if (off < 0)
2252 return EINVAL;
2253
2254 if (blksize < DEV_BSIZE)
2255 return EINVAL;
2256 if (blksize % DEV_BSIZE)
2257 return EINVAL;
2258
2259 #ifdef _KERNEL
2260 #if 0
2261 /* XXX vp->v_size isn't reliably set for VBLK devices,
2262 * especially root. However, we might still want to verify
2263 * that the full load is readable */
2264 if ((off + count) * blksize > vp->v_size)
2265 return EINVAL;
2266 #endif
2267
2268 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2269 return error;
2270 }
2271 #else /* ! _KERNEL */
2272 devvp = vp;
2273 logpbn = off;
2274 #endif /* ! _KERNEL */
2275
2276 scratch = wapbl_malloc(MAXBSIZE);
2277
2278 error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, logpbn);
2279 if (error)
2280 goto errout;
2281
2282 wch = (struct wapbl_wc_header *)scratch;
2283 wch2 =
2284 (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
2285 /* XXX verify checksums and magic numbers */
2286 if (wch->wc_type != WAPBL_WC_HEADER) {
2287 printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
2288 error = EFTYPE;
2289 goto errout;
2290 }
2291
2292 if (wch2->wc_generation > wch->wc_generation)
2293 wch = wch2;
2294
2295 wr = wapbl_calloc(1, sizeof(*wr));
2296
2297 wr->wr_logvp = vp;
2298 wr->wr_devvp = devvp;
2299 wr->wr_logpbn = logpbn;
2300
2301 wr->wr_scratch = scratch;
2302
2303 memcpy(&wr->wr_wc_header, wch, sizeof(wr->wr_wc_header));
2304
2305 used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
2306
2307 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2308 ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
2309 " len=%"PRId64" used=%zu\n",
2310 wch->wc_head, wch->wc_tail, wch->wc_circ_off,
2311 wch->wc_circ_size, used));
2312
2313 wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
2314 error = wapbl_replay_prescan(wr);
2315 if (error) {
2316 wapbl_replay_stop(wr);
2317 wapbl_replay_free(wr);
2318 return error;
2319 }
2320
2321 error = wapbl_replay_get_inodes(wr);
2322 if (error) {
2323 wapbl_replay_stop(wr);
2324 wapbl_replay_free(wr);
2325 return error;
2326 }
2327
2328 *wrp = wr;
2329 return 0;
2330
2331 errout:
2332 wapbl_free(scratch);
2333 return error;
2334 }
2335
2336 void
2337 wapbl_replay_stop(struct wapbl_replay *wr)
2338 {
2339
2340 WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
2341
2342 KDASSERT(wapbl_replay_isopen(wr));
2343
2344 wapbl_free(wr->wr_scratch);
2345 wr->wr_scratch = 0;
2346
2347 wr->wr_logvp = 0;
2348
2349 wapbl_blkhash_clear(wr);
2350 wapbl_blkhash_free(wr);
2351 }
2352
2353 void
2354 wapbl_replay_free(struct wapbl_replay *wr)
2355 {
2356
2357 KDASSERT(!wapbl_replay_isopen(wr));
2358
2359 if (wr->wr_inodes)
2360 wapbl_free(wr->wr_inodes);
2361 wapbl_free(wr);
2362 }
2363
2364 int
2365 wapbl_replay_isopen1(struct wapbl_replay *wr)
2366 {
2367
2368 return wapbl_replay_isopen(wr);
2369 }
2370
2371 static int
2372 wapbl_replay_prescan(struct wapbl_replay *wr)
2373 {
2374 off_t off;
2375 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2376 int error;
2377
2378 int logblklen = 1<<wch->wc_log_dev_bshift;
2379 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2380
2381 wapbl_blkhash_clear(wr);
2382
2383 off = wch->wc_tail;
2384 while (off != wch->wc_head) {
2385 struct wapbl_wc_null *wcn;
2386 off_t saveoff = off;
2387 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2388 if (error)
2389 goto errout;
2390 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2391 switch (wcn->wc_type) {
2392 case WAPBL_WC_BLOCKS:
2393 {
2394 struct wapbl_wc_blocklist *wc =
2395 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2396 int i;
2397 for (i = 0; i < wc->wc_blkcount; i++) {
2398 int j, n;
2399 /*
2400 * Enter each physical block into the
2401 * hashtable independently
2402 */
2403 n = wc->wc_blocks[i].wc_dlen >>
2404 wch->wc_fs_dev_bshift;
2405 for (j = 0; j < n; j++) {
2406 wapbl_blkhash_ins(wr,
2407 wc->wc_blocks[i].wc_daddr + j,
2408 off);
2409 wapbl_circ_advance(wr,
2410 fsblklen, &off);
2411 }
2412 }
2413 }
2414 break;
2415
2416 case WAPBL_WC_REVOCATIONS:
2417 {
2418 struct wapbl_wc_blocklist *wc =
2419 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2420 int i;
2421 for (i = 0; i < wc->wc_blkcount; i++) {
2422 int j, n;
2423 /*
2424 * Remove any blocks found from the
2425 * hashtable
2426 */
2427 n = wc->wc_blocks[i].wc_dlen >>
2428 wch->wc_fs_dev_bshift;
2429 for (j = 0; j < n; j++) {
2430 wapbl_blkhash_rem(wr,
2431 wc->wc_blocks[i].wc_daddr + j);
2432 }
2433 }
2434 }
2435 break;
2436
2437 case WAPBL_WC_INODES:
2438 {
2439 struct wapbl_wc_inodelist *wc =
2440 (struct wapbl_wc_inodelist *)wr->wr_scratch;
2441 /*
2442 * Keep track of where we found this so we
2443 * can use it later
2444 */
2445 if (wc->wc_clear) {
2446 wr->wr_inodestail = saveoff;
2447 wr->wr_inodescnt = 0;
2448 }
2449 if (wr->wr_inodestail)
2450 wr->wr_inodeshead = off;
2451 wr->wr_inodescnt += wc->wc_inocnt;
2452 }
2453 break;
2454 default:
2455 printf("Unrecognized wapbl type: 0x%08x\n",
2456 wcn->wc_type);
2457 error = EFTYPE;
2458 goto errout;
2459 }
2460 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2461 if (off != saveoff) {
2462 printf("wapbl_replay: corrupted records\n");
2463 error = EFTYPE;
2464 goto errout;
2465 }
2466 }
2467 return 0;
2468
2469 errout:
2470 wapbl_blkhash_clear(wr);
2471 return error;
2472 }
2473
2474 static int
2475 wapbl_replay_get_inodes(struct wapbl_replay *wr)
2476 {
2477 off_t off;
2478 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2479 int logblklen = 1<<wch->wc_log_dev_bshift;
2480 int cnt= 0;
2481
2482 KDASSERT(wapbl_replay_isopen(wr));
2483
2484 if (wr->wr_inodescnt == 0)
2485 return 0;
2486
2487 KASSERT(!wr->wr_inodes);
2488
2489 wr->wr_inodes = wapbl_malloc(wr->wr_inodescnt*sizeof(wr->wr_inodes[0]));
2490
2491 off = wr->wr_inodestail;
2492
2493 while (off != wr->wr_inodeshead) {
2494 struct wapbl_wc_null *wcn;
2495 int error;
2496 off_t saveoff = off;
2497 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2498 if (error) {
2499 wapbl_free(wr->wr_inodes);
2500 wr->wr_inodes = 0;
2501 return error;
2502 }
2503 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2504 switch (wcn->wc_type) {
2505 case WAPBL_WC_BLOCKS:
2506 case WAPBL_WC_REVOCATIONS:
2507 break;
2508 case WAPBL_WC_INODES:
2509 {
2510 struct wapbl_wc_inodelist *wc =
2511 (struct wapbl_wc_inodelist *)wr->wr_scratch;
2512 /*
2513 * Keep track of where we found this so we
2514 * can use it later
2515 */
2516 if (wc->wc_clear) {
2517 cnt = 0;
2518 }
2519 /* This memcpy assumes that wr_inodes is
2520 * laid out the same as wc_inodes. */
2521 memcpy(&wr->wr_inodes[cnt], wc->wc_inodes,
2522 wc->wc_inocnt*sizeof(wc->wc_inodes[0]));
2523 cnt += wc->wc_inocnt;
2524 }
2525 break;
2526 default:
2527 KASSERT(0);
2528 }
2529 off = saveoff;
2530 wapbl_circ_advance(wr, wcn->wc_len, &off);
2531 }
2532 KASSERT(cnt == wr->wr_inodescnt);
2533 return 0;
2534 }
2535
2536 #ifdef DEBUG
2537 int
2538 wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
2539 {
2540 off_t off;
2541 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2542 int mismatchcnt = 0;
2543 int logblklen = 1<<wch->wc_log_dev_bshift;
2544 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2545 void *scratch1 = wapbl_malloc(MAXBSIZE);
2546 void *scratch2 = wapbl_malloc(MAXBSIZE);
2547 int error = 0;
2548
2549 KDASSERT(wapbl_replay_isopen(wr));
2550
2551 off = wch->wc_tail;
2552 while (off != wch->wc_head) {
2553 struct wapbl_wc_null *wcn;
2554 #ifdef DEBUG
2555 off_t saveoff = off;
2556 #endif
2557 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2558 if (error)
2559 goto out;
2560 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2561 switch (wcn->wc_type) {
2562 case WAPBL_WC_BLOCKS:
2563 {
2564 struct wapbl_wc_blocklist *wc =
2565 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2566 int i;
2567 for (i = 0; i < wc->wc_blkcount; i++) {
2568 int foundcnt = 0;
2569 int dirtycnt = 0;
2570 int j, n;
2571 /*
2572 * Check each physical block into the
2573 * hashtable independently
2574 */
2575 n = wc->wc_blocks[i].wc_dlen >>
2576 wch->wc_fs_dev_bshift;
2577 for (j = 0; j < n; j++) {
2578 struct wapbl_blk *wb =
2579 wapbl_blkhash_get(wr,
2580 wc->wc_blocks[i].wc_daddr + j);
2581 if (wb && (wb->wb_off == off)) {
2582 foundcnt++;
2583 error =
2584 wapbl_circ_read(wr,
2585 scratch1, fsblklen,
2586 &off);
2587 if (error)
2588 goto out;
2589 error =
2590 wapbl_read(scratch2,
2591 fsblklen, fsdevvp,
2592 wb->wb_blk);
2593 if (error)
2594 goto out;
2595 if (memcmp(scratch1,
2596 scratch2,
2597 fsblklen)) {
2598 printf(
2599 "wapbl_verify: mismatch block %"PRId64" at off %"PRIdMAX"\n",
2600 wb->wb_blk, (intmax_t)off);
2601 dirtycnt++;
2602 mismatchcnt++;
2603 }
2604 } else {
2605 wapbl_circ_advance(wr,
2606 fsblklen, &off);
2607 }
2608 }
2609 #if 0
2610 /*
2611 * If all of the blocks in an entry
2612 * are clean, then remove all of its
2613 * blocks from the hashtable since they
2614 * never will need replay.
2615 */
2616 if ((foundcnt != 0) &&
2617 (dirtycnt == 0)) {
2618 off = saveoff;
2619 wapbl_circ_advance(wr,
2620 logblklen, &off);
2621 for (j = 0; j < n; j++) {
2622 struct wapbl_blk *wb =
2623 wapbl_blkhash_get(wr,
2624 wc->wc_blocks[i].wc_daddr + j);
2625 if (wb &&
2626 (wb->wb_off == off)) {
2627 wapbl_blkhash_rem(wr, wb->wb_blk);
2628 }
2629 wapbl_circ_advance(wr,
2630 fsblklen, &off);
2631 }
2632 }
2633 #endif
2634 }
2635 }
2636 break;
2637 case WAPBL_WC_REVOCATIONS:
2638 case WAPBL_WC_INODES:
2639 break;
2640 default:
2641 KASSERT(0);
2642 }
2643 #ifdef DEBUG
2644 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2645 KASSERT(off == saveoff);
2646 #endif
2647 }
2648 out:
2649 wapbl_free(scratch1);
2650 wapbl_free(scratch2);
2651 if (!error && mismatchcnt)
2652 error = EFTYPE;
2653 return error;
2654 }
2655 #endif
2656
2657 int
2658 wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
2659 {
2660 off_t off;
2661 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2662 int logblklen = 1<<wch->wc_log_dev_bshift;
2663 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2664 void *scratch1 = wapbl_malloc(MAXBSIZE);
2665 int error = 0;
2666
2667 KDASSERT(wapbl_replay_isopen(wr));
2668
2669 /*
2670 * This parses the journal for replay, although it could
2671 * just as easily walk the hashtable instead.
2672 */
2673
2674 off = wch->wc_tail;
2675 while (off != wch->wc_head) {
2676 struct wapbl_wc_null *wcn;
2677 #ifdef DEBUG
2678 off_t saveoff = off;
2679 #endif
2680 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2681 if (error)
2682 goto out;
2683 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2684 switch (wcn->wc_type) {
2685 case WAPBL_WC_BLOCKS:
2686 {
2687 struct wapbl_wc_blocklist *wc =
2688 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2689 int i;
2690 for (i = 0; i < wc->wc_blkcount; i++) {
2691 int j, n;
2692 /*
2693 * Check each physical block against
2694 * the hashtable independently
2695 */
2696 n = wc->wc_blocks[i].wc_dlen >>
2697 wch->wc_fs_dev_bshift;
2698 for (j = 0; j < n; j++) {
2699 struct wapbl_blk *wb =
2700 wapbl_blkhash_get(wr,
2701 wc->wc_blocks[i].wc_daddr + j);
2702 if (wb && (wb->wb_off == off)) {
2703 error = wapbl_circ_read(
2704 wr, scratch1,
2705 fsblklen, &off);
2706 if (error)
2707 goto out;
2708 error =
2709 wapbl_write(scratch1,
2710 fsblklen, fsdevvp,
2711 wb->wb_blk);
2712 if (error)
2713 goto out;
2714 } else {
2715 wapbl_circ_advance(wr,
2716 fsblklen, &off);
2717 }
2718 }
2719 }
2720 }
2721 break;
2722 case WAPBL_WC_REVOCATIONS:
2723 case WAPBL_WC_INODES:
2724 break;
2725 default:
2726 KASSERT(0);
2727 }
2728 #ifdef DEBUG
2729 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2730 KASSERT(off == saveoff);
2731 #endif
2732 }
2733 out:
2734 wapbl_free(scratch1);
2735 return error;
2736 }
2737
2738 int
2739 wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
2740 {
2741 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2742 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2743
2744 KDASSERT(wapbl_replay_isopen(wr));
2745
2746 KASSERT((len % fsblklen) == 0);
2747
2748 while (len != 0) {
2749 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2750 if (wb) {
2751 off_t off = wb->wb_off;
2752 int error;
2753 error = wapbl_circ_read(wr, data, fsblklen, &off);
2754 if (error)
2755 return error;
2756 }
2757 data = (uint8_t *)data + fsblklen;
2758 len -= fsblklen;
2759 blk++;
2760 }
2761 return 0;
2762 }
2763