vfs_wapbl.c revision 1.3.8.1.4.1 1 /* $NetBSD: vfs_wapbl.c,v 1.3.8.1.4.1 2011/05/20 08:11:27 matt Exp $ */
2
3 /*-
4 * Copyright (c) 2003, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This implements file system independent write ahead filesystem logging.
34 */
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.3.8.1.4.1 2011/05/20 08:11:27 matt Exp $");
37
38 #include <sys/param.h>
39
40 #ifdef _KERNEL
41 #include <sys/param.h>
42 #include <sys/namei.h>
43 #include <sys/proc.h>
44 #include <sys/uio.h>
45 #include <sys/vnode.h>
46 #include <sys/file.h>
47 #include <sys/malloc.h>
48 #include <sys/resourcevar.h>
49 #include <sys/conf.h>
50 #include <sys/mount.h>
51 #include <sys/kernel.h>
52 #include <sys/kauth.h>
53 #include <sys/mutex.h>
54 #include <sys/atomic.h>
55 #include <sys/wapbl.h>
56
57 #if WAPBL_UVM_ALLOC
58 #include <uvm/uvm.h>
59 #endif
60
61 #include <miscfs/specfs/specdev.h>
62
63 MALLOC_JUSTDEFINE(M_WAPBL, "wapbl", "write-ahead physical block logging");
64 #define wapbl_malloc(s) malloc((s), M_WAPBL, M_WAITOK)
65 #define wapbl_free(a) free((a), M_WAPBL)
66 #define wapbl_calloc(n, s) malloc((n)*(s), M_WAPBL, M_WAITOK | M_ZERO)
67
68 #else /* !_KERNEL */
69 #include <assert.h>
70 #include <errno.h>
71 #include <stdio.h>
72 #include <stdbool.h>
73 #include <stdlib.h>
74 #include <string.h>
75
76 #include <sys/time.h>
77 #include <sys/wapbl.h>
78
79 #define KDASSERT(x) assert(x)
80 #define KASSERT(x) assert(x)
81 #define wapbl_malloc(s) malloc(s)
82 #define wapbl_free(a) free(a)
83 #define wapbl_calloc(n, s) calloc((n), (s))
84
85 #endif /* !_KERNEL */
86
87 /*
88 * INTERNAL DATA STRUCTURES
89 */
90
91 /*
92 * This structure holds per-mount log information.
93 *
94 * Legend: a = atomic access only
95 * r = read-only after init
96 * l = rwlock held
97 * m = mutex held
98 * u = unlocked access ok
99 * b = bufcache_lock held
100 */
101 struct wapbl {
102 struct vnode *wl_logvp; /* r: log here */
103 struct vnode *wl_devvp; /* r: log on this device */
104 struct mount *wl_mount; /* r: mountpoint wl is associated with */
105 daddr_t wl_logpbn; /* r: Physical block number of start of log */
106 int wl_log_dev_bshift; /* r: logarithm of device block size of log
107 device */
108 int wl_fs_dev_bshift; /* r: logarithm of device block size of
109 filesystem device */
110
111 unsigned wl_lock_count; /* m: Count of transactions in progress */
112
113 size_t wl_circ_size; /* r: Number of bytes in buffer of log */
114 size_t wl_circ_off; /* r: Number of bytes reserved at start */
115
116 size_t wl_bufcount_max; /* r: Number of buffers reserved for log */
117 size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */
118
119 off_t wl_head; /* l: Byte offset of log head */
120 off_t wl_tail; /* l: Byte offset of log tail */
121 /*
122 * head == tail == 0 means log is empty
123 * head == tail != 0 means log is full
124 * see assertions in wapbl_advance() for other boundary conditions.
125 * only truncate moves the tail, except when flush sets it to
126 * wl_header_size only flush moves the head, except when truncate
127 * sets it to 0.
128 */
129
130 struct wapbl_wc_header *wl_wc_header; /* l */
131 void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */
132
133 kmutex_t wl_mtx; /* u: short-term lock */
134 krwlock_t wl_rwlock; /* u: File system transaction lock */
135
136 /*
137 * Must be held while accessing
138 * wl_count or wl_bufs or head or tail
139 */
140
141 /*
142 * Callback called from within the flush routine to flush any extra
143 * bits. Note that flush may be skipped without calling this if
144 * there are no outstanding buffers in the transaction.
145 */
146 wapbl_flush_fn_t wl_flush; /* r */
147 wapbl_flush_fn_t wl_flush_abort;/* r */
148
149 size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */
150 size_t wl_bufcount; /* m: Count of buffers in wl_bufs */
151 size_t wl_bcount; /* m: Total bcount of wl_bufs */
152
153 LIST_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
154
155 kcondvar_t wl_reclaimable_cv; /* m (obviously) */
156 size_t wl_reclaimable_bytes; /* m: Amount of space available for
157 reclamation by truncate */
158 int wl_error_count; /* m: # of wl_entries with errors */
159 size_t wl_reserved_bytes; /* never truncate log smaller than this */
160
161 #ifdef WAPBL_DEBUG_BUFBYTES
162 size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
163 #endif
164
165 daddr_t *wl_deallocblks;/* l: address of block */
166 int *wl_dealloclens; /* l: size of block (fragments, kom ihg) */
167 int wl_dealloccnt; /* l: total count */
168 int wl_dealloclim; /* l: max count */
169
170 /* hashtable of inode numbers for allocated but unlinked inodes */
171 /* synch ??? */
172 LIST_HEAD(wapbl_ino_head, wapbl_ino) *wl_inohash;
173 u_long wl_inohashmask;
174 int wl_inohashcnt;
175
176 SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* On disk transaction
177 accounting */
178 };
179
180 #ifdef WAPBL_DEBUG_PRINT
181 int wapbl_debug_print = WAPBL_DEBUG_PRINT;
182 #endif
183
184 /****************************************************************/
185 #ifdef _KERNEL
186
187 #ifdef WAPBL_DEBUG
188 struct wapbl *wapbl_debug_wl;
189 #endif
190
191 static int wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail);
192 static int wapbl_write_blocks(struct wapbl *wl, off_t *offp);
193 static int wapbl_write_revocations(struct wapbl *wl, off_t *offp);
194 static int wapbl_write_inodes(struct wapbl *wl, off_t *offp);
195 #endif /* _KERNEL */
196
197 static int wapbl_replay_prescan(struct wapbl_replay *wr);
198 static int wapbl_replay_get_inodes(struct wapbl_replay *wr);
199
200 static __inline size_t wapbl_space_free(size_t avail, off_t head,
201 off_t tail);
202 static __inline size_t wapbl_space_used(size_t avail, off_t head,
203 off_t tail);
204
205 #ifdef _KERNEL
206
207 #define WAPBL_INODETRK_SIZE 83
208 static int wapbl_ino_pool_refcount;
209 static struct pool wapbl_ino_pool;
210 struct wapbl_ino {
211 LIST_ENTRY(wapbl_ino) wi_hash;
212 ino_t wi_ino;
213 mode_t wi_mode;
214 };
215
216 static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
217 static void wapbl_inodetrk_free(struct wapbl *wl);
218 static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
219
220 static size_t wapbl_transaction_len(struct wapbl *wl);
221 static __inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
222
223 /*
224 * This is useful for debugging. If set, the log will
225 * only be truncated when necessary.
226 */
227 int wapbl_lazy_truncate = 0;
228
229 struct wapbl_ops wapbl_ops = {
230 .wo_wapbl_discard = wapbl_discard,
231 .wo_wapbl_replay_isopen = wapbl_replay_isopen1,
232 .wo_wapbl_replay_read = wapbl_replay_read,
233 .wo_wapbl_add_buf = wapbl_add_buf,
234 .wo_wapbl_remove_buf = wapbl_remove_buf,
235 .wo_wapbl_resize_buf = wapbl_resize_buf,
236 .wo_wapbl_begin = wapbl_begin,
237 .wo_wapbl_end = wapbl_end,
238 .wo_wapbl_junlock_assert= wapbl_junlock_assert,
239
240 /* XXX: the following is only used to say "this is a wapbl buf" */
241 .wo_wapbl_biodone = wapbl_biodone,
242 };
243
244 void
245 wapbl_init()
246 {
247
248 malloc_type_attach(M_WAPBL);
249 }
250
251 int
252 wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
253 daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
254 wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
255 {
256 struct wapbl *wl;
257 struct vnode *devvp;
258 daddr_t logpbn;
259 int error;
260 int log_dev_bshift = DEV_BSHIFT;
261 int fs_dev_bshift = DEV_BSHIFT;
262 int run;
263
264 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_start: vp=%p off=%" PRId64
265 " count=%zu blksize=%zu\n", vp, off, count, blksize));
266
267 if (log_dev_bshift > fs_dev_bshift) {
268 WAPBL_PRINTF(WAPBL_PRINT_OPEN,
269 ("wapbl: log device's block size cannot be larger "
270 "than filesystem's\n"));
271 /*
272 * Not currently implemented, although it could be if
273 * needed someday.
274 */
275 return ENOSYS;
276 }
277
278 if (off < 0)
279 return EINVAL;
280
281 if (blksize < DEV_BSIZE)
282 return EINVAL;
283 if (blksize % DEV_BSIZE)
284 return EINVAL;
285
286 /* XXXTODO: verify that the full load is writable */
287
288 /*
289 * XXX check for minimum log size
290 * minimum is governed by minimum amount of space
291 * to complete a transaction. (probably truncate)
292 */
293 /* XXX for now pick something minimal */
294 if ((count * blksize) < MAXPHYS) {
295 return ENOSPC;
296 }
297
298 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
299 return error;
300 }
301
302 wl = wapbl_calloc(1, sizeof(*wl));
303 rw_init(&wl->wl_rwlock);
304 mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
305 cv_init(&wl->wl_reclaimable_cv, "wapblrec");
306 LIST_INIT(&wl->wl_bufs);
307 SIMPLEQ_INIT(&wl->wl_entries);
308
309 wl->wl_logvp = vp;
310 wl->wl_devvp = devvp;
311 wl->wl_mount = mp;
312 wl->wl_logpbn = logpbn;
313 wl->wl_log_dev_bshift = log_dev_bshift;
314 wl->wl_fs_dev_bshift = fs_dev_bshift;
315
316 wl->wl_flush = flushfn;
317 wl->wl_flush_abort = flushabortfn;
318
319 /* Reserve two log device blocks for the commit headers */
320 wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
321 wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
322 /* truncate the log usage to a multiple of log_dev_bshift */
323 wl->wl_circ_size >>= wl->wl_log_dev_bshift;
324 wl->wl_circ_size <<= wl->wl_log_dev_bshift;
325
326 /*
327 * wl_bufbytes_max limits the size of the in memory transaction space.
328 * - Since buffers are allocated and accounted for in units of
329 * PAGE_SIZE it is required to be a multiple of PAGE_SIZE
330 * (i.e. 1<<PAGE_SHIFT)
331 * - Since the log device has to be written in units of
332 * 1<<wl_log_dev_bshift it is required to be a mulitple of
333 * 1<<wl_log_dev_bshift.
334 * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
335 * it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
336 * Therefore it must be multiple of the least common multiple of those
337 * three quantities. Fortunately, all of those quantities are
338 * guaranteed to be a power of two, and the least common multiple of
339 * a set of numbers which are all powers of two is simply the maximum
340 * of those numbers. Finally, the maximum logarithm of a power of two
341 * is the same as the log of the maximum power of two. So we can do
342 * the following operations to size wl_bufbytes_max:
343 */
344
345 /* XXX fix actual number of pages reserved per filesystem. */
346 wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
347
348 /* Round wl_bufbytes_max to the largest power of two constraint */
349 wl->wl_bufbytes_max >>= PAGE_SHIFT;
350 wl->wl_bufbytes_max <<= PAGE_SHIFT;
351 wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
352 wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
353 wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
354 wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
355
356 /* XXX maybe use filesystem fragment size instead of 1024 */
357 /* XXX fix actual number of buffers reserved per filesystem. */
358 wl->wl_bufcount_max = (nbuf / 2) * 1024;
359
360 /* XXX tie this into resource estimation */
361 wl->wl_dealloclim = 2 * btodb(wl->wl_bufbytes_max);
362
363 #if WAPBL_UVM_ALLOC
364 wl->wl_deallocblks = (void *) uvm_km_zalloc(kernel_map,
365 round_page(sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim));
366 KASSERT(wl->wl_deallocblks != NULL);
367 wl->wl_dealloclens = (void *) uvm_km_zalloc(kernel_map,
368 round_page(sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim));
369 KASSERT(wl->wl_dealloclens != NULL);
370 #else
371 wl->wl_deallocblks = wapbl_malloc(sizeof(*wl->wl_deallocblks) *
372 wl->wl_dealloclim);
373 wl->wl_dealloclens = wapbl_malloc(sizeof(*wl->wl_dealloclens) *
374 wl->wl_dealloclim);
375 #endif
376
377 wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
378
379 /* Initialize the commit header */
380 {
381 struct wapbl_wc_header *wc;
382 size_t len = 1<<wl->wl_log_dev_bshift;
383 wc = wapbl_calloc(1, len);
384 wc->wc_type = WAPBL_WC_HEADER;
385 wc->wc_len = len;
386 wc->wc_circ_off = wl->wl_circ_off;
387 wc->wc_circ_size = wl->wl_circ_size;
388 /* XXX wc->wc_fsid */
389 wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
390 wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
391 wl->wl_wc_header = wc;
392 wl->wl_wc_scratch = wapbl_malloc(len);
393 }
394
395 /*
396 * if there was an existing set of unlinked but
397 * allocated inodes, preserve it in the new
398 * log.
399 */
400 if (wr && wr->wr_inodescnt) {
401 int i;
402
403 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
404 ("wapbl_start: reusing log with %d inodes\n",
405 wr->wr_inodescnt));
406
407 /*
408 * Its only valid to reuse the replay log if its
409 * the same as the new log we just opened.
410 */
411 KDASSERT(!wapbl_replay_isopen(wr));
412 KASSERT(devvp->v_rdev == wr->wr_devvp->v_rdev);
413 KASSERT(logpbn == wr->wr_logpbn);
414 KASSERT(wl->wl_circ_size == wr->wr_wc_header.wc_circ_size);
415 KASSERT(wl->wl_circ_off == wr->wr_wc_header.wc_circ_off);
416 KASSERT(wl->wl_log_dev_bshift ==
417 wr->wr_wc_header.wc_log_dev_bshift);
418 KASSERT(wl->wl_fs_dev_bshift ==
419 wr->wr_wc_header.wc_fs_dev_bshift);
420
421 wl->wl_wc_header->wc_generation =
422 wr->wr_wc_header.wc_generation + 1;
423
424 for (i = 0; i < wr->wr_inodescnt; i++)
425 wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
426 wr->wr_inodes[i].wr_imode);
427
428 /* Make sure new transaction won't overwrite old inodes list */
429 KDASSERT(wapbl_transaction_len(wl) <=
430 wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
431 wr->wr_inodestail));
432
433 wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
434 wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
435 wapbl_transaction_len(wl);
436
437 error = wapbl_write_inodes(wl, &wl->wl_head);
438 if (error)
439 goto errout;
440
441 KASSERT(wl->wl_head != wl->wl_tail);
442 KASSERT(wl->wl_head != 0);
443 }
444
445 error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
446 if (error) {
447 goto errout;
448 }
449
450 *wlp = wl;
451 #if defined(WAPBL_DEBUG)
452 wapbl_debug_wl = wl;
453 #endif
454
455 return 0;
456 errout:
457 wapbl_discard(wl);
458 wapbl_free(wl->wl_wc_scratch);
459 wapbl_free(wl->wl_wc_header);
460 #if WAPBL_UVM_ALLOC
461 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_deallocblks,
462 round_page(sizeof(*wl->wl_deallocblks *
463 wl->wl_dealloclim)));
464 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_dealloclens,
465 round_page(sizeof(*wl->wl_dealloclens *
466 wl->wl_dealloclim)));
467 #else
468 wapbl_free(wl->wl_deallocblks);
469 wapbl_free(wl->wl_dealloclens);
470 #endif
471 wapbl_inodetrk_free(wl);
472 wapbl_free(wl);
473
474 return error;
475 }
476
477 /*
478 * Like wapbl_flush, only discards the transaction
479 * completely
480 */
481
482 void
483 wapbl_discard(struct wapbl *wl)
484 {
485 struct wapbl_entry *we;
486 struct buf *bp;
487 int i;
488
489 /*
490 * XXX we may consider using upgrade here
491 * if we want to call flush from inside a transaction
492 */
493 rw_enter(&wl->wl_rwlock, RW_WRITER);
494 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
495 wl->wl_dealloccnt);
496
497 #ifdef WAPBL_DEBUG_PRINT
498 {
499 struct wapbl_entry *we;
500 pid_t pid = -1;
501 lwpid_t lid = -1;
502 if (curproc)
503 pid = curproc->p_pid;
504 if (curlwp)
505 lid = curlwp->l_lid;
506 #ifdef WAPBL_DEBUG_BUFBYTES
507 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
508 ("wapbl_discard: thread %d.%d discarding "
509 "transaction\n"
510 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
511 "deallocs=%d inodes=%d\n"
512 "\terrcnt = %u, reclaimable=%zu reserved=%zu "
513 "unsynced=%zu\n",
514 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
515 wl->wl_bcount, wl->wl_dealloccnt,
516 wl->wl_inohashcnt, wl->wl_error_count,
517 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
518 wl->wl_unsynced_bufbytes));
519 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
520 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
521 ("\tentry: bufcount = %zu, reclaimable = %zu, "
522 "error = %d, unsynced = %zu\n",
523 we->we_bufcount, we->we_reclaimable_bytes,
524 we->we_error, we->we_unsynced_bufbytes));
525 }
526 #else /* !WAPBL_DEBUG_BUFBYTES */
527 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
528 ("wapbl_discard: thread %d.%d discarding transaction\n"
529 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
530 "deallocs=%d inodes=%d\n"
531 "\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
532 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
533 wl->wl_bcount, wl->wl_dealloccnt,
534 wl->wl_inohashcnt, wl->wl_error_count,
535 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes));
536 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
537 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
538 ("\tentry: bufcount = %zu, reclaimable = %zu, "
539 "error = %d\n",
540 we->we_bufcount, we->we_reclaimable_bytes,
541 we->we_error));
542 }
543 #endif /* !WAPBL_DEBUG_BUFBYTES */
544 }
545 #endif /* WAPBL_DEBUG_PRINT */
546
547 for (i = 0; i <= wl->wl_inohashmask; i++) {
548 struct wapbl_ino_head *wih;
549 struct wapbl_ino *wi;
550
551 wih = &wl->wl_inohash[i];
552 while ((wi = LIST_FIRST(wih)) != NULL) {
553 LIST_REMOVE(wi, wi_hash);
554 pool_put(&wapbl_ino_pool, wi);
555 KASSERT(wl->wl_inohashcnt > 0);
556 wl->wl_inohashcnt--;
557 }
558 }
559
560 /*
561 * clean buffer list
562 */
563 mutex_enter(&bufcache_lock);
564 mutex_enter(&wl->wl_mtx);
565 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
566 if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
567 /*
568 * The buffer will be unlocked and
569 * removed from the transaction in brelse
570 */
571 mutex_exit(&wl->wl_mtx);
572 brelsel(bp, 0);
573 mutex_enter(&wl->wl_mtx);
574 }
575 }
576 mutex_exit(&wl->wl_mtx);
577 mutex_exit(&bufcache_lock);
578
579 /*
580 * Remove references to this wl from wl_entries, free any which
581 * no longer have buffers, others will be freed in wapbl_biodone
582 * when they no longer have any buffers.
583 */
584 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
585 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
586 /* XXX should we be accumulating wl_error_count
587 * and increasing reclaimable bytes ? */
588 we->we_wapbl = NULL;
589 if (we->we_bufcount == 0) {
590 #ifdef WAPBL_DEBUG_BUFBYTES
591 KASSERT(we->we_unsynced_bufbytes == 0);
592 #endif
593 wapbl_free(we);
594 }
595 }
596
597 /* Discard list of deallocs */
598 wl->wl_dealloccnt = 0;
599 /* XXX should we clear wl_reserved_bytes? */
600
601 KASSERT(wl->wl_bufbytes == 0);
602 KASSERT(wl->wl_bcount == 0);
603 KASSERT(wl->wl_bufcount == 0);
604 KASSERT(LIST_EMPTY(&wl->wl_bufs));
605 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
606 KASSERT(wl->wl_inohashcnt == 0);
607
608 rw_exit(&wl->wl_rwlock);
609 }
610
611 int
612 wapbl_stop(struct wapbl *wl, int force)
613 {
614 struct vnode *vp;
615 int error;
616
617 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
618 error = wapbl_flush(wl, 1);
619 if (error) {
620 if (force)
621 wapbl_discard(wl);
622 else
623 return error;
624 }
625
626 /* Unlinked inodes persist after a flush */
627 if (wl->wl_inohashcnt) {
628 if (force) {
629 wapbl_discard(wl);
630 } else {
631 return EBUSY;
632 }
633 }
634
635 KASSERT(wl->wl_bufbytes == 0);
636 KASSERT(wl->wl_bcount == 0);
637 KASSERT(wl->wl_bufcount == 0);
638 KASSERT(LIST_EMPTY(&wl->wl_bufs));
639 KASSERT(wl->wl_dealloccnt == 0);
640 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
641 KASSERT(wl->wl_inohashcnt == 0);
642
643 vp = wl->wl_logvp;
644
645 wapbl_free(wl->wl_wc_scratch);
646 wapbl_free(wl->wl_wc_header);
647 #if WAPBL_UVM_ALLOC
648 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_deallocblks,
649 round_page(sizeof(*wl->wl_deallocblks *
650 wl->wl_dealloclim)));
651 uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_dealloclens,
652 round_page(sizeof(*wl->wl_dealloclens *
653 wl->wl_dealloclim)));
654 #else
655 wapbl_free(wl->wl_deallocblks);
656 wapbl_free(wl->wl_dealloclens);
657 #endif
658 wapbl_inodetrk_free(wl);
659
660 cv_destroy(&wl->wl_reclaimable_cv);
661 mutex_destroy(&wl->wl_mtx);
662 rw_destroy(&wl->wl_rwlock);
663 wapbl_free(wl);
664
665 return 0;
666 }
667
668 static int
669 wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
670 {
671 struct pstats *pstats = curlwp->l_proc->p_stats;
672 struct buf *bp;
673 int error;
674
675 KASSERT((flags & ~(B_WRITE | B_READ)) == 0);
676 KASSERT(devvp->v_type == VBLK);
677
678 if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
679 mutex_enter(&devvp->v_interlock);
680 devvp->v_numoutput++;
681 mutex_exit(&devvp->v_interlock);
682 pstats->p_ru.ru_oublock++;
683 } else {
684 pstats->p_ru.ru_inblock++;
685 }
686
687 bp = getiobuf(devvp, true);
688 bp->b_flags = flags;
689 bp->b_cflags = BC_BUSY; /* silly & dubious */
690 bp->b_dev = devvp->v_rdev;
691 bp->b_data = data;
692 bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
693 bp->b_blkno = pbn;
694
695 WAPBL_PRINTF(WAPBL_PRINT_IO,
696 ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%x\n",
697 BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
698 bp->b_blkno, bp->b_dev));
699
700 VOP_STRATEGY(devvp, bp);
701
702 error = biowait(bp);
703 putiobuf(bp);
704
705 if (error) {
706 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
707 ("wapbl_doio: %s %zu bytes at block %" PRId64
708 " on dev 0x%x failed with error %d\n",
709 (((flags & (B_WRITE | B_READ)) == B_WRITE) ?
710 "write" : "read"),
711 len, pbn, devvp->v_rdev, error));
712 }
713
714 return error;
715 }
716
717 int
718 wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
719 {
720
721 return wapbl_doio(data, len, devvp, pbn, B_WRITE);
722 }
723
724 int
725 wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
726 {
727
728 return wapbl_doio(data, len, devvp, pbn, B_READ);
729 }
730
731 /*
732 * Off is byte offset returns new offset for next write
733 * handles log wraparound
734 */
735 static int
736 wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
737 {
738 size_t slen;
739 off_t off = *offp;
740 int error;
741
742 KDASSERT(((len >> wl->wl_log_dev_bshift) <<
743 wl->wl_log_dev_bshift) == len);
744
745 if (off < wl->wl_circ_off)
746 off = wl->wl_circ_off;
747 slen = wl->wl_circ_off + wl->wl_circ_size - off;
748 if (slen < len) {
749 error = wapbl_write(data, slen, wl->wl_devvp,
750 wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
751 if (error)
752 return error;
753 data = (uint8_t *)data + slen;
754 len -= slen;
755 off = wl->wl_circ_off;
756 }
757 error = wapbl_write(data, len, wl->wl_devvp,
758 wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
759 if (error)
760 return error;
761 off += len;
762 if (off >= wl->wl_circ_off + wl->wl_circ_size)
763 off = wl->wl_circ_off;
764 *offp = off;
765 return 0;
766 }
767
768 /****************************************************************/
769
770 int
771 wapbl_begin(struct wapbl *wl, const char *file, int line)
772 {
773 int doflush;
774 unsigned lockcount;
775
776 KDASSERT(wl);
777
778 /*
779 * XXX this needs to be made much more sophisticated.
780 * perhaps each wapbl_begin could reserve a specified
781 * number of buffers and bytes.
782 */
783 mutex_enter(&wl->wl_mtx);
784 lockcount = wl->wl_lock_count;
785 doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) >
786 wl->wl_bufbytes_max / 2) ||
787 ((wl->wl_bufcount + (lockcount * 10)) >
788 wl->wl_bufcount_max / 2) ||
789 (wapbl_transaction_len(wl) > wl->wl_circ_size / 2);
790 mutex_exit(&wl->wl_mtx);
791
792 if (doflush) {
793 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
794 ("force flush lockcnt=%d bufbytes=%zu "
795 "(max=%zu) bufcount=%zu (max=%zu)\n",
796 lockcount, wl->wl_bufbytes,
797 wl->wl_bufbytes_max, wl->wl_bufcount,
798 wl->wl_bufcount_max));
799 }
800
801 if (doflush) {
802 int error = wapbl_flush(wl, 0);
803 if (error)
804 return error;
805 }
806
807 rw_enter(&wl->wl_rwlock, RW_READER);
808 mutex_enter(&wl->wl_mtx);
809 wl->wl_lock_count++;
810 mutex_exit(&wl->wl_mtx);
811
812 #if defined(WAPBL_DEBUG_PRINT)
813 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
814 ("wapbl_begin thread %d.%d with bufcount=%zu "
815 "bufbytes=%zu bcount=%zu at %s:%d\n",
816 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
817 wl->wl_bufbytes, wl->wl_bcount, file, line));
818 #endif
819
820 return 0;
821 }
822
823 void
824 wapbl_end(struct wapbl *wl)
825 {
826
827 #if defined(WAPBL_DEBUG_PRINT)
828 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
829 ("wapbl_end thread %d.%d with bufcount=%zu "
830 "bufbytes=%zu bcount=%zu\n",
831 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
832 wl->wl_bufbytes, wl->wl_bcount));
833 #endif
834
835 mutex_enter(&wl->wl_mtx);
836 KASSERT(wl->wl_lock_count > 0);
837 wl->wl_lock_count--;
838 mutex_exit(&wl->wl_mtx);
839
840 rw_exit(&wl->wl_rwlock);
841 }
842
843 void
844 wapbl_add_buf(struct wapbl *wl, struct buf * bp)
845 {
846
847 KASSERT(bp->b_cflags & BC_BUSY);
848 KASSERT(bp->b_vp);
849
850 wapbl_jlock_assert(wl);
851
852 #if 0
853 /*
854 * XXX this might be an issue for swapfiles.
855 * see uvm_swap.c:1702
856 *
857 * XXX2 why require it then? leap of semantics?
858 */
859 KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
860 #endif
861
862 mutex_enter(&wl->wl_mtx);
863 if (bp->b_flags & B_LOCKED) {
864 LIST_REMOVE(bp, b_wapbllist);
865 WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
866 ("wapbl_add_buf thread %d.%d re-adding buf %p "
867 "with %d bytes %d bcount\n",
868 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
869 bp->b_bcount));
870 } else {
871 /* unlocked by dirty buffers shouldn't exist */
872 KASSERT(!(bp->b_oflags & BO_DELWRI));
873 wl->wl_bufbytes += bp->b_bufsize;
874 wl->wl_bcount += bp->b_bcount;
875 wl->wl_bufcount++;
876 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
877 ("wapbl_add_buf thread %d.%d adding buf %p "
878 "with %d bytes %d bcount\n",
879 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
880 bp->b_bcount));
881 }
882 LIST_INSERT_HEAD(&wl->wl_bufs, bp, b_wapbllist);
883 mutex_exit(&wl->wl_mtx);
884
885 bp->b_flags |= B_LOCKED;
886 }
887
888 static void
889 wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
890 {
891
892 KASSERT(mutex_owned(&wl->wl_mtx));
893 KASSERT(bp->b_cflags & BC_BUSY);
894 wapbl_jlock_assert(wl);
895
896 #if 0
897 /*
898 * XXX this might be an issue for swapfiles.
899 * see uvm_swap.c:1725
900 *
901 * XXXdeux: see above
902 */
903 KASSERT((bp->b_flags & BC_NOCACHE) == 0);
904 #endif
905 KASSERT(bp->b_flags & B_LOCKED);
906
907 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
908 ("wapbl_remove_buf thread %d.%d removing buf %p with "
909 "%d bytes %d bcount\n",
910 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, bp->b_bcount));
911
912 KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
913 wl->wl_bufbytes -= bp->b_bufsize;
914 KASSERT(wl->wl_bcount >= bp->b_bcount);
915 wl->wl_bcount -= bp->b_bcount;
916 KASSERT(wl->wl_bufcount > 0);
917 wl->wl_bufcount--;
918 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
919 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
920 LIST_REMOVE(bp, b_wapbllist);
921
922 bp->b_flags &= ~B_LOCKED;
923 }
924
925 /* called from brelsel() in vfs_bio among other places */
926 void
927 wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
928 {
929
930 mutex_enter(&wl->wl_mtx);
931 wapbl_remove_buf_locked(wl, bp);
932 mutex_exit(&wl->wl_mtx);
933 }
934
935 void
936 wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
937 {
938
939 KASSERT(bp->b_cflags & BC_BUSY);
940
941 /*
942 * XXX: why does this depend on B_LOCKED? otherwise the buf
943 * is not for a transaction? if so, why is this called in the
944 * first place?
945 */
946 if (bp->b_flags & B_LOCKED) {
947 mutex_enter(&wl->wl_mtx);
948 wl->wl_bufbytes += bp->b_bufsize - oldsz;
949 wl->wl_bcount += bp->b_bcount - oldcnt;
950 mutex_exit(&wl->wl_mtx);
951 }
952 }
953
954 #endif /* _KERNEL */
955
956 /****************************************************************/
957 /* Some utility inlines */
958
959 /* This is used to advance the pointer at old to new value at old+delta */
960 static __inline off_t
961 wapbl_advance(size_t size, size_t off, off_t old, size_t delta)
962 {
963 off_t new;
964
965 /* Define acceptable ranges for inputs. */
966 KASSERT(delta <= size);
967 KASSERT((old == 0) || (old >= off));
968 KASSERT(old < (size + off));
969
970 if ((old == 0) && (delta != 0))
971 new = off + delta;
972 else if ((old + delta) < (size + off))
973 new = old + delta;
974 else
975 new = (old + delta) - size;
976
977 /* Note some interesting axioms */
978 KASSERT((delta != 0) || (new == old));
979 KASSERT((delta == 0) || (new != 0));
980 KASSERT((delta != (size)) || (new == old));
981
982 /* Define acceptable ranges for output. */
983 KASSERT((new == 0) || (new >= off));
984 KASSERT(new < (size + off));
985 return new;
986 }
987
988 static __inline size_t
989 wapbl_space_used(size_t avail, off_t head, off_t tail)
990 {
991
992 if (tail == 0) {
993 KASSERT(head == 0);
994 return 0;
995 }
996 return ((head + (avail - 1) - tail) % avail) + 1;
997 }
998
999 static __inline size_t
1000 wapbl_space_free(size_t avail, off_t head, off_t tail)
1001 {
1002
1003 return avail - wapbl_space_used(avail, head, tail);
1004 }
1005
1006 static __inline void
1007 wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1008 off_t *tailp)
1009 {
1010 off_t head = *headp;
1011 off_t tail = *tailp;
1012
1013 KASSERT(delta <= wapbl_space_free(size, head, tail));
1014 head = wapbl_advance(size, off, head, delta);
1015 if ((tail == 0) && (head != 0))
1016 tail = off;
1017 *headp = head;
1018 *tailp = tail;
1019 }
1020
1021 static __inline void
1022 wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1023 off_t *tailp)
1024 {
1025 off_t head = *headp;
1026 off_t tail = *tailp;
1027
1028 KASSERT(delta <= wapbl_space_used(size, head, tail));
1029 tail = wapbl_advance(size, off, tail, delta);
1030 if (head == tail) {
1031 head = tail = 0;
1032 }
1033 *headp = head;
1034 *tailp = tail;
1035 }
1036
1037 #ifdef _KERNEL
1038
1039 /****************************************************************/
1040
1041 /*
1042 * Remove transactions whose buffers are completely flushed to disk.
1043 * Will block until at least minfree space is available.
1044 * only intended to be called from inside wapbl_flush and therefore
1045 * does not protect against commit races with itself or with flush.
1046 */
1047 static int
1048 wapbl_truncate(struct wapbl *wl, size_t minfree, int waitonly)
1049 {
1050 size_t delta;
1051 size_t avail;
1052 off_t head;
1053 off_t tail;
1054 int error = 0;
1055
1056 KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1057 KASSERT(rw_write_held(&wl->wl_rwlock));
1058
1059 mutex_enter(&wl->wl_mtx);
1060
1061 /*
1062 * First check to see if we have to do a commit
1063 * at all.
1064 */
1065 avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1066 if (minfree < avail) {
1067 mutex_exit(&wl->wl_mtx);
1068 return 0;
1069 }
1070 minfree -= avail;
1071 while ((wl->wl_error_count == 0) &&
1072 (wl->wl_reclaimable_bytes < minfree)) {
1073 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1074 ("wapbl_truncate: sleeping on %p wl=%p bytes=%zd "
1075 "minfree=%zd\n",
1076 &wl->wl_reclaimable_bytes, wl, wl->wl_reclaimable_bytes,
1077 minfree));
1078
1079 cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1080 }
1081 if (wl->wl_reclaimable_bytes < minfree) {
1082 KASSERT(wl->wl_error_count);
1083 /* XXX maybe get actual error from buffer instead someday? */
1084 error = EIO;
1085 }
1086 head = wl->wl_head;
1087 tail = wl->wl_tail;
1088 delta = wl->wl_reclaimable_bytes;
1089
1090 /* If all of of the entries are flushed, then be sure to keep
1091 * the reserved bytes reserved. Watch out for discarded transactions,
1092 * which could leave more bytes reserved than are reclaimable.
1093 */
1094 if (SIMPLEQ_EMPTY(&wl->wl_entries) &&
1095 (delta >= wl->wl_reserved_bytes)) {
1096 delta -= wl->wl_reserved_bytes;
1097 }
1098 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1099 &tail);
1100 KDASSERT(wl->wl_reserved_bytes <=
1101 wapbl_space_used(wl->wl_circ_size, head, tail));
1102 mutex_exit(&wl->wl_mtx);
1103
1104 if (error)
1105 return error;
1106
1107 if (waitonly)
1108 return 0;
1109
1110 /*
1111 * This is where head, tail and delta are unprotected
1112 * from races against itself or flush. This is ok since
1113 * we only call this routine from inside flush itself.
1114 *
1115 * XXX: how can it race against itself when accessed only
1116 * from behind the write-locked rwlock?
1117 */
1118 error = wapbl_write_commit(wl, head, tail);
1119 if (error)
1120 return error;
1121
1122 wl->wl_head = head;
1123 wl->wl_tail = tail;
1124
1125 mutex_enter(&wl->wl_mtx);
1126 KASSERT(wl->wl_reclaimable_bytes >= delta);
1127 wl->wl_reclaimable_bytes -= delta;
1128 mutex_exit(&wl->wl_mtx);
1129 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1130 ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1131 curproc->p_pid, curlwp->l_lid, delta));
1132
1133 return 0;
1134 }
1135
1136 /****************************************************************/
1137
1138 void
1139 wapbl_biodone(struct buf *bp)
1140 {
1141 struct wapbl_entry *we = bp->b_private;
1142 struct wapbl *wl = we->we_wapbl;
1143
1144 /*
1145 * Handle possible flushing of buffers after log has been
1146 * decomissioned.
1147 */
1148 if (!wl) {
1149 KASSERT(we->we_bufcount > 0);
1150 we->we_bufcount--;
1151 #ifdef WAPBL_DEBUG_BUFBYTES
1152 KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1153 we->we_unsynced_bufbytes -= bp->b_bufsize;
1154 #endif
1155
1156 if (we->we_bufcount == 0) {
1157 #ifdef WAPBL_DEBUG_BUFBYTES
1158 KASSERT(we->we_unsynced_bufbytes == 0);
1159 #endif
1160 wapbl_free(we);
1161 }
1162
1163 brelse(bp, 0);
1164 return;
1165 }
1166
1167 #ifdef ohbother
1168 KDASSERT(bp->b_flags & B_DONE);
1169 KDASSERT(!(bp->b_flags & B_DELWRI));
1170 KDASSERT(bp->b_flags & B_ASYNC);
1171 KDASSERT(bp->b_flags & B_BUSY);
1172 KDASSERT(!(bp->b_flags & B_LOCKED));
1173 KDASSERT(!(bp->b_flags & B_READ));
1174 KDASSERT(!(bp->b_flags & B_INVAL));
1175 KDASSERT(!(bp->b_flags & B_NOCACHE));
1176 #endif
1177
1178 if (bp->b_error) {
1179 #ifdef notyet /* Can't currently handle possible dirty buffer reuse */
1180 XXXpooka: interfaces not fully updated
1181 Note: this was not enabled in the original patch
1182 against netbsd4 either. I don't know if comment
1183 above is true or not.
1184
1185 /*
1186 * If an error occurs, report the error and leave the
1187 * buffer as a delayed write on the LRU queue.
1188 * restarting the write would likely result in
1189 * an error spinloop, so let it be done harmlessly
1190 * by the syncer.
1191 */
1192 bp->b_flags &= ~(B_DONE);
1193 simple_unlock(&bp->b_interlock);
1194
1195 if (we->we_error == 0) {
1196 mutex_enter(&wl->wl_mtx);
1197 wl->wl_error_count++;
1198 mutex_exit(&wl->wl_mtx);
1199 cv_broadcast(&wl->wl_reclaimable_cv);
1200 }
1201 we->we_error = bp->b_error;
1202 bp->b_error = 0;
1203 brelse(bp);
1204 return;
1205 #else
1206 /* For now, just mark the log permanently errored out */
1207
1208 mutex_enter(&wl->wl_mtx);
1209 if (wl->wl_error_count == 0) {
1210 wl->wl_error_count++;
1211 cv_broadcast(&wl->wl_reclaimable_cv);
1212 }
1213 mutex_exit(&wl->wl_mtx);
1214 #endif
1215 }
1216
1217 mutex_enter(&wl->wl_mtx);
1218
1219 KASSERT(we->we_bufcount > 0);
1220 we->we_bufcount--;
1221 #ifdef WAPBL_DEBUG_BUFBYTES
1222 KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1223 we->we_unsynced_bufbytes -= bp->b_bufsize;
1224 KASSERT(wl->wl_unsynced_bufbytes >= bp->b_bufsize);
1225 wl->wl_unsynced_bufbytes -= bp->b_bufsize;
1226 #endif
1227
1228 /*
1229 * If the current transaction can be reclaimed, start
1230 * at the beginning and reclaim any consecutive reclaimable
1231 * transactions. If we successfully reclaim anything,
1232 * then wakeup anyone waiting for the reclaim.
1233 */
1234 if (we->we_bufcount == 0) {
1235 size_t delta = 0;
1236 int errcnt = 0;
1237 #ifdef WAPBL_DEBUG_BUFBYTES
1238 KDASSERT(we->we_unsynced_bufbytes == 0);
1239 #endif
1240 /*
1241 * clear any posted error, since the buffer it came from
1242 * has successfully flushed by now
1243 */
1244 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1245 (we->we_bufcount == 0)) {
1246 delta += we->we_reclaimable_bytes;
1247 if (we->we_error)
1248 errcnt++;
1249 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1250 wapbl_free(we);
1251 }
1252
1253 if (delta) {
1254 wl->wl_reclaimable_bytes += delta;
1255 KASSERT(wl->wl_error_count >= errcnt);
1256 wl->wl_error_count -= errcnt;
1257 cv_broadcast(&wl->wl_reclaimable_cv);
1258 }
1259 }
1260
1261 mutex_exit(&wl->wl_mtx);
1262 brelse(bp, 0);
1263 }
1264
1265 /*
1266 * Write transactions to disk + start I/O for contents
1267 */
1268 int
1269 wapbl_flush(struct wapbl *wl, int waitfor)
1270 {
1271 struct buf *bp;
1272 struct wapbl_entry *we;
1273 off_t off;
1274 off_t head;
1275 off_t tail;
1276 size_t delta = 0;
1277 size_t flushsize;
1278 size_t reserved;
1279 int error = 0;
1280
1281 /*
1282 * Do a quick check to see if a full flush can be skipped
1283 * This assumes that the flush callback does not need to be called
1284 * unless there are other outstanding bufs.
1285 */
1286 if (!waitfor) {
1287 size_t nbufs;
1288 mutex_enter(&wl->wl_mtx); /* XXX need mutex here to
1289 protect the KASSERTS */
1290 nbufs = wl->wl_bufcount;
1291 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1292 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1293 mutex_exit(&wl->wl_mtx);
1294 if (nbufs == 0)
1295 return 0;
1296 }
1297
1298 /*
1299 * XXX we may consider using LK_UPGRADE here
1300 * if we want to call flush from inside a transaction
1301 */
1302 rw_enter(&wl->wl_rwlock, RW_WRITER);
1303 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
1304 wl->wl_dealloccnt);
1305
1306 /*
1307 * Now that we are fully locked and flushed,
1308 * do another check for nothing to do.
1309 */
1310 if (wl->wl_bufcount == 0) {
1311 goto out;
1312 }
1313
1314 #if 0
1315 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1316 ("wapbl_flush thread %d.%d flushing entries with "
1317 "bufcount=%zu bufbytes=%zu\n",
1318 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1319 wl->wl_bufbytes));
1320 #endif
1321
1322 /* Calculate amount of space needed to flush */
1323 flushsize = wapbl_transaction_len(wl);
1324
1325 if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1326 /*
1327 * XXX this could be handled more gracefully, perhaps place
1328 * only a partial transaction in the log and allow the
1329 * remaining to flush without the protection of the journal.
1330 */
1331 panic("wapbl_flush: current transaction too big to flush\n");
1332 }
1333
1334 error = wapbl_truncate(wl, flushsize, 0);
1335 if (error)
1336 goto out2;
1337
1338 off = wl->wl_head;
1339 KASSERT((off == 0) || ((off >= wl->wl_circ_off) &&
1340 (off < wl->wl_circ_off + wl->wl_circ_size)));
1341 error = wapbl_write_blocks(wl, &off);
1342 if (error)
1343 goto out2;
1344 error = wapbl_write_revocations(wl, &off);
1345 if (error)
1346 goto out2;
1347 error = wapbl_write_inodes(wl, &off);
1348 if (error)
1349 goto out2;
1350
1351 reserved = 0;
1352 if (wl->wl_inohashcnt)
1353 reserved = wapbl_transaction_inodes_len(wl);
1354
1355 head = wl->wl_head;
1356 tail = wl->wl_tail;
1357
1358 wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1359 &head, &tail);
1360 #ifdef WAPBL_DEBUG
1361 if (head != off) {
1362 panic("lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1363 " off=%"PRIdMAX" flush=%zu\n",
1364 (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1365 flushsize);
1366 }
1367 #else
1368 KASSERT(head == off);
1369 #endif
1370
1371 /* Opportunistically move the tail forward if we can */
1372 if (!wapbl_lazy_truncate) {
1373 mutex_enter(&wl->wl_mtx);
1374 delta = wl->wl_reclaimable_bytes;
1375 mutex_exit(&wl->wl_mtx);
1376 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1377 &head, &tail);
1378 }
1379
1380 error = wapbl_write_commit(wl, head, tail);
1381 if (error)
1382 goto out2;
1383
1384 /* poolme? or kmemme? */
1385 we = wapbl_calloc(1, sizeof(*we));
1386
1387 #ifdef WAPBL_DEBUG_BUFBYTES
1388 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1389 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1390 " unsynced=%zu"
1391 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1392 "inodes=%d\n",
1393 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1394 wapbl_space_used(wl->wl_circ_size, head, tail),
1395 wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1396 wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1397 wl->wl_inohashcnt));
1398 #else
1399 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1400 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1401 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1402 "inodes=%d\n",
1403 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1404 wapbl_space_used(wl->wl_circ_size, head, tail),
1405 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1406 wl->wl_dealloccnt, wl->wl_inohashcnt));
1407 #endif
1408
1409
1410 mutex_enter(&bufcache_lock);
1411 mutex_enter(&wl->wl_mtx);
1412
1413 wl->wl_reserved_bytes = reserved;
1414 wl->wl_head = head;
1415 wl->wl_tail = tail;
1416 KASSERT(wl->wl_reclaimable_bytes >= delta);
1417 wl->wl_reclaimable_bytes -= delta;
1418 wl->wl_dealloccnt = 0;
1419 #ifdef WAPBL_DEBUG_BUFBYTES
1420 wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1421 #endif
1422
1423 we->we_wapbl = wl;
1424 we->we_bufcount = wl->wl_bufcount;
1425 #ifdef WAPBL_DEBUG_BUFBYTES
1426 we->we_unsynced_bufbytes = wl->wl_bufbytes;
1427 #endif
1428 we->we_reclaimable_bytes = flushsize;
1429 we->we_error = 0;
1430 SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1431
1432 /*
1433 * this flushes bufs in reverse order than they were queued
1434 * it shouldn't matter, but if we care we could use TAILQ instead.
1435 * XXX Note they will get put on the lru queue when they flush
1436 * so we might actually want to change this to preserve order.
1437 */
1438 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
1439 if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1440 continue;
1441 }
1442 bp->b_iodone = wapbl_biodone;
1443 bp->b_private = we;
1444 bremfree(bp);
1445 wapbl_remove_buf_locked(wl, bp);
1446 mutex_exit(&wl->wl_mtx);
1447 mutex_exit(&bufcache_lock);
1448 bawrite(bp);
1449 mutex_enter(&bufcache_lock);
1450 mutex_enter(&wl->wl_mtx);
1451 }
1452 mutex_exit(&wl->wl_mtx);
1453 mutex_exit(&bufcache_lock);
1454
1455 #if 0
1456 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1457 ("wapbl_flush thread %d.%d done flushing entries...\n",
1458 curproc->p_pid, curlwp->l_lid));
1459 #endif
1460
1461 out:
1462
1463 /*
1464 * If the waitfor flag is set, don't return until everything is
1465 * fully flushed and the on disk log is empty.
1466 */
1467 if (waitfor) {
1468 error = wapbl_truncate(wl, wl->wl_circ_size -
1469 wl->wl_reserved_bytes, wapbl_lazy_truncate);
1470 }
1471
1472 out2:
1473 if (error) {
1474 wl->wl_flush_abort(wl->wl_mount, wl->wl_deallocblks,
1475 wl->wl_dealloclens, wl->wl_dealloccnt);
1476 }
1477
1478 #ifdef WAPBL_DEBUG_PRINT
1479 if (error) {
1480 pid_t pid = -1;
1481 lwpid_t lid = -1;
1482 if (curproc)
1483 pid = curproc->p_pid;
1484 if (curlwp)
1485 lid = curlwp->l_lid;
1486 mutex_enter(&wl->wl_mtx);
1487 #ifdef WAPBL_DEBUG_BUFBYTES
1488 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1489 ("wapbl_flush: thread %d.%d aborted flush: "
1490 "error = %d\n"
1491 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1492 "deallocs=%d inodes=%d\n"
1493 "\terrcnt = %d, reclaimable=%zu reserved=%zu "
1494 "unsynced=%zu\n",
1495 pid, lid, error, wl->wl_bufcount,
1496 wl->wl_bufbytes, wl->wl_bcount,
1497 wl->wl_dealloccnt, wl->wl_inohashcnt,
1498 wl->wl_error_count, wl->wl_reclaimable_bytes,
1499 wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1500 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1501 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1502 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1503 "error = %d, unsynced = %zu\n",
1504 we->we_bufcount, we->we_reclaimable_bytes,
1505 we->we_error, we->we_unsynced_bufbytes));
1506 }
1507 #else
1508 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1509 ("wapbl_flush: thread %d.%d aborted flush: "
1510 "error = %d\n"
1511 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1512 "deallocs=%d inodes=%d\n"
1513 "\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
1514 pid, lid, error, wl->wl_bufcount,
1515 wl->wl_bufbytes, wl->wl_bcount,
1516 wl->wl_dealloccnt, wl->wl_inohashcnt,
1517 wl->wl_error_count, wl->wl_reclaimable_bytes,
1518 wl->wl_reserved_bytes));
1519 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1520 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1521 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1522 "error = %d\n", we->we_bufcount,
1523 we->we_reclaimable_bytes, we->we_error));
1524 }
1525 #endif
1526 mutex_exit(&wl->wl_mtx);
1527 }
1528 #endif
1529
1530 rw_exit(&wl->wl_rwlock);
1531 return error;
1532 }
1533
1534 /****************************************************************/
1535
1536 void
1537 wapbl_jlock_assert(struct wapbl *wl)
1538 {
1539
1540 KASSERT(rw_lock_held(&wl->wl_rwlock));
1541 }
1542
1543 void
1544 wapbl_junlock_assert(struct wapbl *wl)
1545 {
1546
1547 KASSERT(!rw_write_held(&wl->wl_rwlock));
1548 }
1549
1550 /****************************************************************/
1551
1552 /* locks missing */
1553 void
1554 wapbl_print(struct wapbl *wl,
1555 int full,
1556 void (*pr)(const char *, ...))
1557 {
1558 struct buf *bp;
1559 struct wapbl_entry *we;
1560 (*pr)("wapbl %p", wl);
1561 (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
1562 wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
1563 (*pr)("circ = %zu, header = %zu, head = %"PRIdMAX" tail = %"PRIdMAX"\n",
1564 wl->wl_circ_size, wl->wl_circ_off,
1565 (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
1566 (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
1567 wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
1568 #ifdef WAPBL_DEBUG_BUFBYTES
1569 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1570 "reserved = %zu errcnt = %d unsynced = %zu\n",
1571 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1572 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1573 wl->wl_error_count, wl->wl_unsynced_bufbytes);
1574 #else
1575 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1576 "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
1577 wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1578 wl->wl_error_count);
1579 #endif
1580 (*pr)("\tdealloccnt = %d, dealloclim = %d\n",
1581 wl->wl_dealloccnt, wl->wl_dealloclim);
1582 (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
1583 wl->wl_inohashcnt, wl->wl_inohashmask);
1584 (*pr)("entries:\n");
1585 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1586 #ifdef WAPBL_DEBUG_BUFBYTES
1587 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
1588 "unsynced = %zu\n",
1589 we->we_bufcount, we->we_reclaimable_bytes,
1590 we->we_error, we->we_unsynced_bufbytes);
1591 #else
1592 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
1593 we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
1594 #endif
1595 }
1596 if (full) {
1597 int cnt = 0;
1598 (*pr)("bufs =");
1599 LIST_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
1600 if (!LIST_NEXT(bp, b_wapbllist)) {
1601 (*pr)(" %p", bp);
1602 } else if ((++cnt % 6) == 0) {
1603 (*pr)(" %p,\n\t", bp);
1604 } else {
1605 (*pr)(" %p,", bp);
1606 }
1607 }
1608 (*pr)("\n");
1609
1610 (*pr)("dealloced blks = ");
1611 {
1612 int i;
1613 cnt = 0;
1614 for (i = 0; i < wl->wl_dealloccnt; i++) {
1615 (*pr)(" %"PRId64":%d,",
1616 wl->wl_deallocblks[i],
1617 wl->wl_dealloclens[i]);
1618 if ((++cnt % 4) == 0) {
1619 (*pr)("\n\t");
1620 }
1621 }
1622 }
1623 (*pr)("\n");
1624
1625 (*pr)("registered inodes = ");
1626 {
1627 int i;
1628 cnt = 0;
1629 for (i = 0; i <= wl->wl_inohashmask; i++) {
1630 struct wapbl_ino_head *wih;
1631 struct wapbl_ino *wi;
1632
1633 wih = &wl->wl_inohash[i];
1634 LIST_FOREACH(wi, wih, wi_hash) {
1635 if (wi->wi_ino == 0)
1636 continue;
1637 (*pr)(" %"PRId32"/0%06"PRIo32",",
1638 wi->wi_ino, wi->wi_mode);
1639 if ((++cnt % 4) == 0) {
1640 (*pr)("\n\t");
1641 }
1642 }
1643 }
1644 (*pr)("\n");
1645 }
1646 }
1647 }
1648
1649 #if defined(WAPBL_DEBUG) || defined(DDB)
1650 void
1651 wapbl_dump(struct wapbl *wl)
1652 {
1653 #if defined(WAPBL_DEBUG)
1654 if (!wl)
1655 wl = wapbl_debug_wl;
1656 #endif
1657 if (!wl)
1658 return;
1659 wapbl_print(wl, 1, printf);
1660 }
1661 #endif
1662
1663 /****************************************************************/
1664
1665 void
1666 wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len)
1667 {
1668
1669 wapbl_jlock_assert(wl);
1670
1671 /* XXX should eventually instead tie this into resource estimation */
1672 /* XXX this KASSERT needs locking/mutex analysis */
1673 KASSERT(wl->wl_dealloccnt < wl->wl_dealloclim);
1674 wl->wl_deallocblks[wl->wl_dealloccnt] = blk;
1675 wl->wl_dealloclens[wl->wl_dealloccnt] = len;
1676 wl->wl_dealloccnt++;
1677 WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
1678 ("wapbl_register_deallocation: blk=%"PRId64" len=%d\n", blk, len));
1679 }
1680
1681 /****************************************************************/
1682
1683 static void
1684 wapbl_inodetrk_init(struct wapbl *wl, u_int size)
1685 {
1686
1687 wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
1688 if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) {
1689 pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0,
1690 "wapblinopl", &pool_allocator_nointr, IPL_NONE);
1691 }
1692 }
1693
1694 static void
1695 wapbl_inodetrk_free(struct wapbl *wl)
1696 {
1697
1698 /* XXX this KASSERT needs locking/mutex analysis */
1699 KASSERT(wl->wl_inohashcnt == 0);
1700 hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
1701 if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) {
1702 pool_destroy(&wapbl_ino_pool);
1703 }
1704 }
1705
1706 static struct wapbl_ino *
1707 wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
1708 {
1709 struct wapbl_ino_head *wih;
1710 struct wapbl_ino *wi;
1711
1712 KASSERT(mutex_owned(&wl->wl_mtx));
1713
1714 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1715 LIST_FOREACH(wi, wih, wi_hash) {
1716 if (ino == wi->wi_ino)
1717 return wi;
1718 }
1719 return 0;
1720 }
1721
1722 void
1723 wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1724 {
1725 struct wapbl_ino_head *wih;
1726 struct wapbl_ino *wi;
1727
1728 wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
1729
1730 mutex_enter(&wl->wl_mtx);
1731 if (wapbl_inodetrk_get(wl, ino) == NULL) {
1732 wi->wi_ino = ino;
1733 wi->wi_mode = mode;
1734 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1735 LIST_INSERT_HEAD(wih, wi, wi_hash);
1736 wl->wl_inohashcnt++;
1737 WAPBL_PRINTF(WAPBL_PRINT_INODE,
1738 ("wapbl_register_inode: ino=%"PRId64"\n", ino));
1739 mutex_exit(&wl->wl_mtx);
1740 } else {
1741 mutex_exit(&wl->wl_mtx);
1742 pool_put(&wapbl_ino_pool, wi);
1743 }
1744 }
1745
1746 void
1747 wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1748 {
1749 struct wapbl_ino *wi;
1750
1751 mutex_enter(&wl->wl_mtx);
1752 wi = wapbl_inodetrk_get(wl, ino);
1753 if (wi) {
1754 WAPBL_PRINTF(WAPBL_PRINT_INODE,
1755 ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
1756 KASSERT(wl->wl_inohashcnt > 0);
1757 wl->wl_inohashcnt--;
1758 LIST_REMOVE(wi, wi_hash);
1759 mutex_exit(&wl->wl_mtx);
1760
1761 pool_put(&wapbl_ino_pool, wi);
1762 } else {
1763 mutex_exit(&wl->wl_mtx);
1764 }
1765 }
1766
1767 /****************************************************************/
1768
1769 static __inline size_t
1770 wapbl_transaction_inodes_len(struct wapbl *wl)
1771 {
1772 int blocklen = 1<<wl->wl_log_dev_bshift;
1773 int iph;
1774
1775 /* Calculate number of inodes described in a inodelist header */
1776 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
1777 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
1778
1779 KASSERT(iph > 0);
1780
1781 return MAX(1, howmany(wl->wl_inohashcnt, iph))*blocklen;
1782 }
1783
1784
1785 /* Calculate amount of space a transaction will take on disk */
1786 static size_t
1787 wapbl_transaction_len(struct wapbl *wl)
1788 {
1789 int blocklen = 1<<wl->wl_log_dev_bshift;
1790 size_t len;
1791 int bph;
1792
1793 /* Calculate number of blocks described in a blocklist header */
1794 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1795 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1796
1797 KASSERT(bph > 0);
1798
1799 len = wl->wl_bcount;
1800 len += howmany(wl->wl_bufcount, bph)*blocklen;
1801 len += howmany(wl->wl_dealloccnt, bph)*blocklen;
1802 len += wapbl_transaction_inodes_len(wl);
1803
1804 return len;
1805 }
1806
1807 /*
1808 * Perform commit operation
1809 *
1810 * Note that generation number incrementation needs to
1811 * be protected against racing with other invocations
1812 * of wapbl_commit. This is ok since this routine
1813 * is only invoked from wapbl_flush
1814 */
1815 static int
1816 wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
1817 {
1818 struct wapbl_wc_header *wc = wl->wl_wc_header;
1819 struct timespec ts;
1820 int error;
1821 int force = 1;
1822
1823 /* XXX Calc checksum here, instead we do this for now */
1824 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1825 if (error) {
1826 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1827 ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1828 "returned %d\n", wl->wl_devvp->v_rdev, error));
1829 }
1830
1831 wc->wc_head = head;
1832 wc->wc_tail = tail;
1833 wc->wc_checksum = 0;
1834 wc->wc_version = 1;
1835 getnanotime(&ts);
1836 wc->wc_time = ts.tv_sec;;
1837 wc->wc_timensec = ts.tv_nsec;
1838
1839 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1840 ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
1841 (intmax_t)head, (intmax_t)tail));
1842
1843 /*
1844 * XXX if generation will rollover, then first zero
1845 * over second commit header before trying to write both headers.
1846 */
1847
1848 error = wapbl_write(wc, wc->wc_len, wl->wl_devvp,
1849 wl->wl_logpbn + wc->wc_generation % 2);
1850 if (error)
1851 return error;
1852
1853 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1854 if (error) {
1855 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1856 ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1857 "returned %d\n", wl->wl_devvp->v_rdev, error));
1858 }
1859
1860 /*
1861 * If the generation number was zero, write it out a second time.
1862 * This handles initialization and generation number rollover
1863 */
1864 if (wc->wc_generation++ == 0) {
1865 error = wapbl_write_commit(wl, head, tail);
1866 /*
1867 * This panic should be able to be removed if we do the
1868 * zero'ing mentioned above, and we are certain to roll
1869 * back generation number on failure.
1870 */
1871 if (error)
1872 panic("wapbl_write_commit: error writing duplicate "
1873 "log header: %d\n", error);
1874 }
1875 return 0;
1876 }
1877
1878 /* Returns new offset value */
1879 static int
1880 wapbl_write_blocks(struct wapbl *wl, off_t *offp)
1881 {
1882 struct wapbl_wc_blocklist *wc =
1883 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1884 int blocklen = 1<<wl->wl_log_dev_bshift;
1885 int bph;
1886 struct buf *bp;
1887 off_t off = *offp;
1888 int error;
1889
1890 KASSERT(rw_write_held(&wl->wl_rwlock));
1891
1892 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1893 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1894
1895 bp = LIST_FIRST(&wl->wl_bufs);
1896
1897 while (bp) {
1898 int cnt;
1899 struct buf *obp = bp;
1900
1901 KASSERT(bp->b_flags & B_LOCKED);
1902
1903 wc->wc_type = WAPBL_WC_BLOCKS;
1904 wc->wc_len = blocklen;
1905 wc->wc_blkcount = 0;
1906 while (bp && (wc->wc_blkcount < bph)) {
1907 /*
1908 * Make sure all the physical block numbers are up to
1909 * date. If this is not always true on a given
1910 * filesystem, then VOP_BMAP must be called. We
1911 * could call VOP_BMAP here, or else in the filesystem
1912 * specific flush callback, although neither of those
1913 * solutions allow us to take the vnode lock. If a
1914 * filesystem requires that we must take the vnode lock
1915 * to call VOP_BMAP, then we can probably do it in
1916 * bwrite when the vnode lock should already be held
1917 * by the invoking code.
1918 */
1919 KASSERT((bp->b_vp->v_type == VBLK) ||
1920 (bp->b_blkno != bp->b_lblkno));
1921 KASSERT(bp->b_blkno > 0);
1922
1923 wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
1924 wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
1925 wc->wc_len += bp->b_bcount;
1926 wc->wc_blkcount++;
1927 bp = LIST_NEXT(bp, b_wapbllist);
1928 }
1929 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1930 ("wapbl_write_blocks: len = %u off = %"PRIdMAX"\n",
1931 wc->wc_len, (intmax_t)off));
1932
1933 error = wapbl_circ_write(wl, wc, blocklen, &off);
1934 if (error)
1935 return error;
1936 bp = obp;
1937 cnt = 0;
1938 while (bp && (cnt++ < bph)) {
1939 error = wapbl_circ_write(wl, bp->b_data,
1940 bp->b_bcount, &off);
1941 if (error)
1942 return error;
1943 bp = LIST_NEXT(bp, b_wapbllist);
1944 }
1945 }
1946 *offp = off;
1947 return 0;
1948 }
1949
1950 static int
1951 wapbl_write_revocations(struct wapbl *wl, off_t *offp)
1952 {
1953 struct wapbl_wc_blocklist *wc =
1954 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1955 int i;
1956 int blocklen = 1<<wl->wl_log_dev_bshift;
1957 int bph;
1958 off_t off = *offp;
1959 int error;
1960
1961 if (wl->wl_dealloccnt == 0)
1962 return 0;
1963
1964 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1965 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1966
1967 i = 0;
1968 while (i < wl->wl_dealloccnt) {
1969 wc->wc_type = WAPBL_WC_REVOCATIONS;
1970 wc->wc_len = blocklen;
1971 wc->wc_blkcount = 0;
1972 while ((i < wl->wl_dealloccnt) && (wc->wc_blkcount < bph)) {
1973 wc->wc_blocks[wc->wc_blkcount].wc_daddr =
1974 wl->wl_deallocblks[i];
1975 wc->wc_blocks[wc->wc_blkcount].wc_dlen =
1976 wl->wl_dealloclens[i];
1977 wc->wc_blkcount++;
1978 i++;
1979 }
1980 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1981 ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
1982 wc->wc_len, (intmax_t)off));
1983 error = wapbl_circ_write(wl, wc, blocklen, &off);
1984 if (error)
1985 return error;
1986 }
1987 *offp = off;
1988 return 0;
1989 }
1990
1991 static int
1992 wapbl_write_inodes(struct wapbl *wl, off_t *offp)
1993 {
1994 struct wapbl_wc_inodelist *wc =
1995 (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
1996 int i;
1997 int blocklen = 1<<wl->wl_log_dev_bshift;
1998 off_t off = *offp;
1999 int error;
2000
2001 struct wapbl_ino_head *wih;
2002 struct wapbl_ino *wi;
2003 int iph;
2004
2005 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2006 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2007
2008 i = 0;
2009 wih = &wl->wl_inohash[0];
2010 wi = 0;
2011 do {
2012 wc->wc_type = WAPBL_WC_INODES;
2013 wc->wc_len = blocklen;
2014 wc->wc_inocnt = 0;
2015 wc->wc_clear = (i == 0);
2016 while ((i < wl->wl_inohashcnt) && (wc->wc_inocnt < iph)) {
2017 while (!wi) {
2018 KASSERT((wih - &wl->wl_inohash[0])
2019 <= wl->wl_inohashmask);
2020 wi = LIST_FIRST(wih++);
2021 }
2022 wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2023 wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2024 wc->wc_inocnt++;
2025 i++;
2026 wi = LIST_NEXT(wi, wi_hash);
2027 }
2028 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2029 ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2030 wc->wc_len, (intmax_t)off));
2031 error = wapbl_circ_write(wl, wc, blocklen, &off);
2032 if (error)
2033 return error;
2034 } while (i < wl->wl_inohashcnt);
2035
2036 *offp = off;
2037 return 0;
2038 }
2039
2040 #endif /* _KERNEL */
2041
2042 /****************************************************************/
2043
2044 #ifdef _KERNEL
2045 static struct pool wapbl_blk_pool;
2046 static int wapbl_blk_pool_refcount;
2047 #endif
2048 struct wapbl_blk {
2049 LIST_ENTRY(wapbl_blk) wb_hash;
2050 daddr_t wb_blk;
2051 off_t wb_off; /* Offset of this block in the log */
2052 };
2053 #define WAPBL_BLKPOOL_MIN 83
2054
2055 static void
2056 wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2057 {
2058 if (size < WAPBL_BLKPOOL_MIN)
2059 size = WAPBL_BLKPOOL_MIN;
2060 KASSERT(wr->wr_blkhash == 0);
2061 #ifdef _KERNEL
2062 wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2063 if (atomic_inc_uint_nv(&wapbl_blk_pool_refcount) == 1) {
2064 pool_init(&wapbl_blk_pool, sizeof(struct wapbl_blk), 0, 0, 0,
2065 "wapblblkpl", &pool_allocator_nointr, IPL_NONE);
2066 }
2067 #else /* ! _KERNEL */
2068 /* Manually implement hashinit */
2069 {
2070 int i;
2071 unsigned long hashsize;
2072 for (hashsize = 1; hashsize < size; hashsize <<= 1)
2073 continue;
2074 wr->wr_blkhash = wapbl_malloc(hashsize * sizeof(*wr->wr_blkhash));
2075 for (i = 0; i < hashsize; i++)
2076 LIST_INIT(&wr->wr_blkhash[i]);
2077 wr->wr_blkhashmask = hashsize - 1;
2078 }
2079 #endif /* ! _KERNEL */
2080 }
2081
2082 static void
2083 wapbl_blkhash_free(struct wapbl_replay *wr)
2084 {
2085 KASSERT(wr->wr_blkhashcnt == 0);
2086 #ifdef _KERNEL
2087 hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2088 if (atomic_dec_uint_nv(&wapbl_blk_pool_refcount) == 0) {
2089 pool_destroy(&wapbl_blk_pool);
2090 }
2091 #else /* ! _KERNEL */
2092 wapbl_free(wr->wr_blkhash);
2093 #endif /* ! _KERNEL */
2094 }
2095
2096 static struct wapbl_blk *
2097 wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2098 {
2099 struct wapbl_blk_head *wbh;
2100 struct wapbl_blk *wb;
2101 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2102 LIST_FOREACH(wb, wbh, wb_hash) {
2103 if (blk == wb->wb_blk)
2104 return wb;
2105 }
2106 return 0;
2107 }
2108
2109 static void
2110 wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2111 {
2112 struct wapbl_blk_head *wbh;
2113 struct wapbl_blk *wb;
2114 wb = wapbl_blkhash_get(wr, blk);
2115 if (wb) {
2116 KASSERT(wb->wb_blk == blk);
2117 wb->wb_off = off;
2118 } else {
2119 #ifdef _KERNEL
2120 wb = pool_get(&wapbl_blk_pool, PR_WAITOK);
2121 #else /* ! _KERNEL */
2122 wb = wapbl_malloc(sizeof(*wb));
2123 #endif /* ! _KERNEL */
2124 wb->wb_blk = blk;
2125 wb->wb_off = off;
2126 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2127 LIST_INSERT_HEAD(wbh, wb, wb_hash);
2128 wr->wr_blkhashcnt++;
2129 }
2130 }
2131
2132 static void
2133 wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2134 {
2135 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2136 if (wb) {
2137 KASSERT(wr->wr_blkhashcnt > 0);
2138 wr->wr_blkhashcnt--;
2139 LIST_REMOVE(wb, wb_hash);
2140 #ifdef _KERNEL
2141 pool_put(&wapbl_blk_pool, wb);
2142 #else /* ! _KERNEL */
2143 wapbl_free(wb);
2144 #endif /* ! _KERNEL */
2145 }
2146 }
2147
2148 static void
2149 wapbl_blkhash_clear(struct wapbl_replay *wr)
2150 {
2151 int i;
2152 for (i = 0; i <= wr->wr_blkhashmask; i++) {
2153 struct wapbl_blk *wb;
2154
2155 while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2156 KASSERT(wr->wr_blkhashcnt > 0);
2157 wr->wr_blkhashcnt--;
2158 LIST_REMOVE(wb, wb_hash);
2159 #ifdef _KERNEL
2160 pool_put(&wapbl_blk_pool, wb);
2161 #else /* ! _KERNEL */
2162 wapbl_free(wb);
2163 #endif /* ! _KERNEL */
2164 }
2165 }
2166 KASSERT(wr->wr_blkhashcnt == 0);
2167 }
2168
2169 /****************************************************************/
2170
2171 static int
2172 wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2173 {
2174 size_t slen;
2175 struct wapbl_wc_header *wc = &wr->wr_wc_header;
2176 off_t off = *offp;
2177 int error;
2178
2179 KASSERT(((len >> wc->wc_log_dev_bshift) <<
2180 wc->wc_log_dev_bshift) == len);
2181 if (off < wc->wc_circ_off)
2182 off = wc->wc_circ_off;
2183 slen = wc->wc_circ_off + wc->wc_circ_size - off;
2184 if (slen < len) {
2185 error = wapbl_read(data, slen, wr->wr_devvp,
2186 wr->wr_logpbn + (off >> wc->wc_log_dev_bshift));
2187 if (error)
2188 return error;
2189 data = (uint8_t *)data + slen;
2190 len -= slen;
2191 off = wc->wc_circ_off;
2192 }
2193 error = wapbl_read(data, len, wr->wr_devvp,
2194 wr->wr_logpbn + (off >> wc->wc_log_dev_bshift));
2195 if (error)
2196 return error;
2197 off += len;
2198 if (off >= wc->wc_circ_off + wc->wc_circ_size)
2199 off = wc->wc_circ_off;
2200 *offp = off;
2201 return 0;
2202 }
2203
2204 static void
2205 wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2206 {
2207 size_t slen;
2208 struct wapbl_wc_header *wc = &wr->wr_wc_header;
2209 off_t off = *offp;
2210
2211 KASSERT(((len >> wc->wc_log_dev_bshift) <<
2212 wc->wc_log_dev_bshift) == len);
2213
2214 if (off < wc->wc_circ_off)
2215 off = wc->wc_circ_off;
2216 slen = wc->wc_circ_off + wc->wc_circ_size - off;
2217 if (slen < len) {
2218 len -= slen;
2219 off = wc->wc_circ_off;
2220 }
2221 off += len;
2222 if (off >= wc->wc_circ_off + wc->wc_circ_size)
2223 off = wc->wc_circ_off;
2224 *offp = off;
2225 }
2226
2227 /****************************************************************/
2228
2229 int
2230 wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2231 daddr_t off, size_t count, size_t blksize)
2232 {
2233 struct wapbl_replay *wr;
2234 int error;
2235 struct vnode *devvp;
2236 daddr_t logpbn;
2237 uint8_t *scratch;
2238 struct wapbl_wc_header *wch;
2239 struct wapbl_wc_header *wch2;
2240 /* Use this until we read the actual log header */
2241 int log_dev_bshift = DEV_BSHIFT;
2242 size_t used;
2243
2244 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2245 ("wapbl_replay_start: vp=%p off=%"PRId64 " count=%zu blksize=%zu\n",
2246 vp, off, count, blksize));
2247
2248 if (off < 0)
2249 return EINVAL;
2250
2251 if (blksize < DEV_BSIZE)
2252 return EINVAL;
2253 if (blksize % DEV_BSIZE)
2254 return EINVAL;
2255
2256 #ifdef _KERNEL
2257 #if 0
2258 /* XXX vp->v_size isn't reliably set for VBLK devices,
2259 * especially root. However, we might still want to verify
2260 * that the full load is readable */
2261 if ((off + count) * blksize > vp->v_size)
2262 return EINVAL;
2263 #endif
2264
2265 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2266 return error;
2267 }
2268 #else /* ! _KERNEL */
2269 devvp = vp;
2270 logpbn = off;
2271 #endif /* ! _KERNEL */
2272
2273 scratch = wapbl_malloc(MAXBSIZE);
2274
2275 error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, logpbn);
2276 if (error)
2277 goto errout;
2278
2279 wch = (struct wapbl_wc_header *)scratch;
2280 wch2 =
2281 (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
2282 /* XXX verify checksums and magic numbers */
2283 if (wch->wc_type != WAPBL_WC_HEADER) {
2284 printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
2285 error = EFTYPE;
2286 goto errout;
2287 }
2288
2289 if (wch2->wc_generation > wch->wc_generation)
2290 wch = wch2;
2291
2292 wr = wapbl_calloc(1, sizeof(*wr));
2293
2294 wr->wr_logvp = vp;
2295 wr->wr_devvp = devvp;
2296 wr->wr_logpbn = logpbn;
2297
2298 wr->wr_scratch = scratch;
2299
2300 memcpy(&wr->wr_wc_header, wch, sizeof(wr->wr_wc_header));
2301
2302 used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
2303
2304 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2305 ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
2306 " len=%"PRId64" used=%zu\n",
2307 wch->wc_head, wch->wc_tail, wch->wc_circ_off,
2308 wch->wc_circ_size, used));
2309
2310 wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
2311 error = wapbl_replay_prescan(wr);
2312 if (error) {
2313 wapbl_replay_stop(wr);
2314 wapbl_replay_free(wr);
2315 return error;
2316 }
2317
2318 error = wapbl_replay_get_inodes(wr);
2319 if (error) {
2320 wapbl_replay_stop(wr);
2321 wapbl_replay_free(wr);
2322 return error;
2323 }
2324
2325 *wrp = wr;
2326 return 0;
2327
2328 errout:
2329 wapbl_free(scratch);
2330 return error;
2331 }
2332
2333 void
2334 wapbl_replay_stop(struct wapbl_replay *wr)
2335 {
2336
2337 WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
2338
2339 KDASSERT(wapbl_replay_isopen(wr));
2340
2341 wapbl_free(wr->wr_scratch);
2342 wr->wr_scratch = 0;
2343
2344 wr->wr_logvp = 0;
2345
2346 wapbl_blkhash_clear(wr);
2347 wapbl_blkhash_free(wr);
2348 }
2349
2350 void
2351 wapbl_replay_free(struct wapbl_replay *wr)
2352 {
2353
2354 KDASSERT(!wapbl_replay_isopen(wr));
2355
2356 if (wr->wr_inodes)
2357 wapbl_free(wr->wr_inodes);
2358 wapbl_free(wr);
2359 }
2360
2361 int
2362 wapbl_replay_isopen1(struct wapbl_replay *wr)
2363 {
2364
2365 return wapbl_replay_isopen(wr);
2366 }
2367
2368 static int
2369 wapbl_replay_prescan(struct wapbl_replay *wr)
2370 {
2371 off_t off;
2372 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2373 int error;
2374
2375 int logblklen = 1<<wch->wc_log_dev_bshift;
2376 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2377
2378 wapbl_blkhash_clear(wr);
2379
2380 off = wch->wc_tail;
2381 while (off != wch->wc_head) {
2382 struct wapbl_wc_null *wcn;
2383 off_t saveoff = off;
2384 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2385 if (error)
2386 goto errout;
2387 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2388 switch (wcn->wc_type) {
2389 case WAPBL_WC_BLOCKS:
2390 {
2391 struct wapbl_wc_blocklist *wc =
2392 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2393 int i;
2394 for (i = 0; i < wc->wc_blkcount; i++) {
2395 int j, n;
2396 /*
2397 * Enter each physical block into the
2398 * hashtable independently
2399 */
2400 n = wc->wc_blocks[i].wc_dlen >>
2401 wch->wc_fs_dev_bshift;
2402 for (j = 0; j < n; j++) {
2403 wapbl_blkhash_ins(wr,
2404 wc->wc_blocks[i].wc_daddr + j,
2405 off);
2406 wapbl_circ_advance(wr,
2407 fsblklen, &off);
2408 }
2409 }
2410 }
2411 break;
2412
2413 case WAPBL_WC_REVOCATIONS:
2414 {
2415 struct wapbl_wc_blocklist *wc =
2416 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2417 int i;
2418 for (i = 0; i < wc->wc_blkcount; i++) {
2419 int j, n;
2420 /*
2421 * Remove any blocks found from the
2422 * hashtable
2423 */
2424 n = wc->wc_blocks[i].wc_dlen >>
2425 wch->wc_fs_dev_bshift;
2426 for (j = 0; j < n; j++) {
2427 wapbl_blkhash_rem(wr,
2428 wc->wc_blocks[i].wc_daddr + j);
2429 }
2430 }
2431 }
2432 break;
2433
2434 case WAPBL_WC_INODES:
2435 {
2436 struct wapbl_wc_inodelist *wc =
2437 (struct wapbl_wc_inodelist *)wr->wr_scratch;
2438 /*
2439 * Keep track of where we found this so we
2440 * can use it later
2441 */
2442 if (wc->wc_clear) {
2443 wr->wr_inodestail = saveoff;
2444 wr->wr_inodescnt = 0;
2445 }
2446 if (wr->wr_inodestail)
2447 wr->wr_inodeshead = off;
2448 wr->wr_inodescnt += wc->wc_inocnt;
2449 }
2450 break;
2451 default:
2452 printf("Unrecognized wapbl type: 0x%08x\n",
2453 wcn->wc_type);
2454 error = EFTYPE;
2455 goto errout;
2456 }
2457 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2458 if (off != saveoff) {
2459 printf("wapbl_replay: corrupted records\n");
2460 error = EFTYPE;
2461 goto errout;
2462 }
2463 }
2464 return 0;
2465
2466 errout:
2467 wapbl_blkhash_clear(wr);
2468 return error;
2469 }
2470
2471 static int
2472 wapbl_replay_get_inodes(struct wapbl_replay *wr)
2473 {
2474 off_t off;
2475 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2476 int logblklen = 1<<wch->wc_log_dev_bshift;
2477 int cnt= 0;
2478
2479 KDASSERT(wapbl_replay_isopen(wr));
2480
2481 if (wr->wr_inodescnt == 0)
2482 return 0;
2483
2484 KASSERT(!wr->wr_inodes);
2485
2486 wr->wr_inodes = wapbl_malloc(wr->wr_inodescnt*sizeof(wr->wr_inodes[0]));
2487
2488 off = wr->wr_inodestail;
2489
2490 while (off != wr->wr_inodeshead) {
2491 struct wapbl_wc_null *wcn;
2492 int error;
2493 off_t saveoff = off;
2494 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2495 if (error) {
2496 wapbl_free(wr->wr_inodes);
2497 wr->wr_inodes = 0;
2498 return error;
2499 }
2500 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2501 switch (wcn->wc_type) {
2502 case WAPBL_WC_BLOCKS:
2503 case WAPBL_WC_REVOCATIONS:
2504 break;
2505 case WAPBL_WC_INODES:
2506 {
2507 struct wapbl_wc_inodelist *wc =
2508 (struct wapbl_wc_inodelist *)wr->wr_scratch;
2509 /*
2510 * Keep track of where we found this so we
2511 * can use it later
2512 */
2513 if (wc->wc_clear) {
2514 cnt = 0;
2515 }
2516 /* This memcpy assumes that wr_inodes is
2517 * laid out the same as wc_inodes. */
2518 memcpy(&wr->wr_inodes[cnt], wc->wc_inodes,
2519 wc->wc_inocnt*sizeof(wc->wc_inodes[0]));
2520 cnt += wc->wc_inocnt;
2521 }
2522 break;
2523 default:
2524 KASSERT(0);
2525 }
2526 off = saveoff;
2527 wapbl_circ_advance(wr, wcn->wc_len, &off);
2528 }
2529 KASSERT(cnt == wr->wr_inodescnt);
2530 return 0;
2531 }
2532
2533 #ifdef DEBUG
2534 int
2535 wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
2536 {
2537 off_t off;
2538 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2539 int mismatchcnt = 0;
2540 int logblklen = 1<<wch->wc_log_dev_bshift;
2541 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2542 void *scratch1 = wapbl_malloc(MAXBSIZE);
2543 void *scratch2 = wapbl_malloc(MAXBSIZE);
2544 int error = 0;
2545
2546 KDASSERT(wapbl_replay_isopen(wr));
2547
2548 off = wch->wc_tail;
2549 while (off != wch->wc_head) {
2550 struct wapbl_wc_null *wcn;
2551 #ifdef DEBUG
2552 off_t saveoff = off;
2553 #endif
2554 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2555 if (error)
2556 goto out;
2557 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2558 switch (wcn->wc_type) {
2559 case WAPBL_WC_BLOCKS:
2560 {
2561 struct wapbl_wc_blocklist *wc =
2562 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2563 int i;
2564 for (i = 0; i < wc->wc_blkcount; i++) {
2565 int foundcnt = 0;
2566 int dirtycnt = 0;
2567 int j, n;
2568 /*
2569 * Check each physical block into the
2570 * hashtable independently
2571 */
2572 n = wc->wc_blocks[i].wc_dlen >>
2573 wch->wc_fs_dev_bshift;
2574 for (j = 0; j < n; j++) {
2575 struct wapbl_blk *wb =
2576 wapbl_blkhash_get(wr,
2577 wc->wc_blocks[i].wc_daddr + j);
2578 if (wb && (wb->wb_off == off)) {
2579 foundcnt++;
2580 error =
2581 wapbl_circ_read(wr,
2582 scratch1, fsblklen,
2583 &off);
2584 if (error)
2585 goto out;
2586 error =
2587 wapbl_read(scratch2,
2588 fsblklen, fsdevvp,
2589 wb->wb_blk);
2590 if (error)
2591 goto out;
2592 if (memcmp(scratch1,
2593 scratch2,
2594 fsblklen)) {
2595 printf(
2596 "wapbl_verify: mismatch block %"PRId64" at off %"PRIdMAX"\n",
2597 wb->wb_blk, (intmax_t)off);
2598 dirtycnt++;
2599 mismatchcnt++;
2600 }
2601 } else {
2602 wapbl_circ_advance(wr,
2603 fsblklen, &off);
2604 }
2605 }
2606 #if 0
2607 /*
2608 * If all of the blocks in an entry
2609 * are clean, then remove all of its
2610 * blocks from the hashtable since they
2611 * never will need replay.
2612 */
2613 if ((foundcnt != 0) &&
2614 (dirtycnt == 0)) {
2615 off = saveoff;
2616 wapbl_circ_advance(wr,
2617 logblklen, &off);
2618 for (j = 0; j < n; j++) {
2619 struct wapbl_blk *wb =
2620 wapbl_blkhash_get(wr,
2621 wc->wc_blocks[i].wc_daddr + j);
2622 if (wb &&
2623 (wb->wb_off == off)) {
2624 wapbl_blkhash_rem(wr, wb->wb_blk);
2625 }
2626 wapbl_circ_advance(wr,
2627 fsblklen, &off);
2628 }
2629 }
2630 #endif
2631 }
2632 }
2633 break;
2634 case WAPBL_WC_REVOCATIONS:
2635 case WAPBL_WC_INODES:
2636 break;
2637 default:
2638 KASSERT(0);
2639 }
2640 #ifdef DEBUG
2641 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2642 KASSERT(off == saveoff);
2643 #endif
2644 }
2645 out:
2646 wapbl_free(scratch1);
2647 wapbl_free(scratch2);
2648 if (!error && mismatchcnt)
2649 error = EFTYPE;
2650 return error;
2651 }
2652 #endif
2653
2654 int
2655 wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
2656 {
2657 off_t off;
2658 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2659 int logblklen = 1<<wch->wc_log_dev_bshift;
2660 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2661 void *scratch1 = wapbl_malloc(MAXBSIZE);
2662 int error = 0;
2663
2664 KDASSERT(wapbl_replay_isopen(wr));
2665
2666 /*
2667 * This parses the journal for replay, although it could
2668 * just as easily walk the hashtable instead.
2669 */
2670
2671 off = wch->wc_tail;
2672 while (off != wch->wc_head) {
2673 struct wapbl_wc_null *wcn;
2674 #ifdef DEBUG
2675 off_t saveoff = off;
2676 #endif
2677 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2678 if (error)
2679 goto out;
2680 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2681 switch (wcn->wc_type) {
2682 case WAPBL_WC_BLOCKS:
2683 {
2684 struct wapbl_wc_blocklist *wc =
2685 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2686 int i;
2687 for (i = 0; i < wc->wc_blkcount; i++) {
2688 int j, n;
2689 /*
2690 * Check each physical block against
2691 * the hashtable independently
2692 */
2693 n = wc->wc_blocks[i].wc_dlen >>
2694 wch->wc_fs_dev_bshift;
2695 for (j = 0; j < n; j++) {
2696 struct wapbl_blk *wb =
2697 wapbl_blkhash_get(wr,
2698 wc->wc_blocks[i].wc_daddr + j);
2699 if (wb && (wb->wb_off == off)) {
2700 error = wapbl_circ_read(
2701 wr, scratch1,
2702 fsblklen, &off);
2703 if (error)
2704 goto out;
2705 error =
2706 wapbl_write(scratch1,
2707 fsblklen, fsdevvp,
2708 wb->wb_blk);
2709 if (error)
2710 goto out;
2711 } else {
2712 wapbl_circ_advance(wr,
2713 fsblklen, &off);
2714 }
2715 }
2716 }
2717 }
2718 break;
2719 case WAPBL_WC_REVOCATIONS:
2720 case WAPBL_WC_INODES:
2721 break;
2722 default:
2723 KASSERT(0);
2724 }
2725 #ifdef DEBUG
2726 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2727 KASSERT(off == saveoff);
2728 #endif
2729 }
2730 out:
2731 wapbl_free(scratch1);
2732 return error;
2733 }
2734
2735 int
2736 wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
2737 {
2738 struct wapbl_wc_header *wch = &wr->wr_wc_header;
2739 int fsblklen = 1<<wch->wc_fs_dev_bshift;
2740
2741 KDASSERT(wapbl_replay_isopen(wr));
2742
2743 KASSERT((len % fsblklen) == 0);
2744
2745 while (len != 0) {
2746 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2747 if (wb) {
2748 off_t off = wb->wb_off;
2749 int error;
2750 error = wapbl_circ_read(wr, data, fsblklen, &off);
2751 if (error)
2752 return error;
2753 }
2754 data = (uint8_t *)data + fsblklen;
2755 len -= fsblklen;
2756 blk++;
2757 }
2758 return 0;
2759 }
2760