vfs_wapbl.c revision 1.67 1 /* $NetBSD: vfs_wapbl.c,v 1.67 2016/05/03 19:43:45 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2003, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This implements file system independent write ahead filesystem logging.
34 */
35
36 #define WAPBL_INTERNAL
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.67 2016/05/03 19:43:45 riastradh Exp $");
40
41 #include <sys/param.h>
42 #include <sys/bitops.h>
43
44 #ifdef _KERNEL
45 #include <sys/param.h>
46 #include <sys/namei.h>
47 #include <sys/proc.h>
48 #include <sys/sysctl.h>
49 #include <sys/uio.h>
50 #include <sys/vnode.h>
51 #include <sys/file.h>
52 #include <sys/module.h>
53 #include <sys/resourcevar.h>
54 #include <sys/conf.h>
55 #include <sys/mount.h>
56 #include <sys/kernel.h>
57 #include <sys/kauth.h>
58 #include <sys/mutex.h>
59 #include <sys/atomic.h>
60 #include <sys/wapbl.h>
61 #include <sys/wapbl_replay.h>
62
63 #include <miscfs/specfs/specdev.h>
64
65 #define wapbl_alloc(s) kmem_alloc((s), KM_SLEEP)
66 #define wapbl_free(a, s) kmem_free((a), (s))
67 #define wapbl_calloc(n, s) kmem_zalloc((n)*(s), KM_SLEEP)
68
69 static struct sysctllog *wapbl_sysctl;
70 static int wapbl_flush_disk_cache = 1;
71 static int wapbl_verbose_commit = 0;
72
73 static inline size_t wapbl_space_free(size_t, off_t, off_t);
74
75 #else /* !_KERNEL */
76 #include <assert.h>
77 #include <errno.h>
78 #include <stdio.h>
79 #include <stdbool.h>
80 #include <stdlib.h>
81 #include <string.h>
82
83 #include <sys/time.h>
84 #include <sys/wapbl.h>
85 #include <sys/wapbl_replay.h>
86
87 #define KDASSERT(x) assert(x)
88 #define KASSERT(x) assert(x)
89 #define wapbl_alloc(s) malloc(s)
90 #define wapbl_free(a, s) free(a)
91 #define wapbl_calloc(n, s) calloc((n), (s))
92
93 #endif /* !_KERNEL */
94
95 /*
96 * INTERNAL DATA STRUCTURES
97 */
98
99 /*
100 * This structure holds per-mount log information.
101 *
102 * Legend: a = atomic access only
103 * r = read-only after init
104 * l = rwlock held
105 * m = mutex held
106 * lm = rwlock held writing or mutex held
107 * u = unlocked access ok
108 * b = bufcache_lock held
109 */
110 LIST_HEAD(wapbl_ino_head, wapbl_ino);
111 struct wapbl {
112 struct vnode *wl_logvp; /* r: log here */
113 struct vnode *wl_devvp; /* r: log on this device */
114 struct mount *wl_mount; /* r: mountpoint wl is associated with */
115 daddr_t wl_logpbn; /* r: Physical block number of start of log */
116 int wl_log_dev_bshift; /* r: logarithm of device block size of log
117 device */
118 int wl_fs_dev_bshift; /* r: logarithm of device block size of
119 filesystem device */
120
121 unsigned wl_lock_count; /* m: Count of transactions in progress */
122
123 size_t wl_circ_size; /* r: Number of bytes in buffer of log */
124 size_t wl_circ_off; /* r: Number of bytes reserved at start */
125
126 size_t wl_bufcount_max; /* r: Number of buffers reserved for log */
127 size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */
128
129 off_t wl_head; /* l: Byte offset of log head */
130 off_t wl_tail; /* l: Byte offset of log tail */
131 /*
132 * head == tail == 0 means log is empty
133 * head == tail != 0 means log is full
134 * see assertions in wapbl_advance() for other boundary conditions.
135 * only truncate moves the tail, except when flush sets it to
136 * wl_header_size only flush moves the head, except when truncate
137 * sets it to 0.
138 */
139
140 struct wapbl_wc_header *wl_wc_header; /* l */
141 void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */
142
143 kmutex_t wl_mtx; /* u: short-term lock */
144 krwlock_t wl_rwlock; /* u: File system transaction lock */
145
146 /*
147 * Must be held while accessing
148 * wl_count or wl_bufs or head or tail
149 */
150
151 /*
152 * Callback called from within the flush routine to flush any extra
153 * bits. Note that flush may be skipped without calling this if
154 * there are no outstanding buffers in the transaction.
155 */
156 #if _KERNEL
157 wapbl_flush_fn_t wl_flush; /* r */
158 wapbl_flush_fn_t wl_flush_abort;/* r */
159 #endif
160
161 size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */
162 size_t wl_bufcount; /* m: Count of buffers in wl_bufs */
163 size_t wl_bcount; /* m: Total bcount of wl_bufs */
164
165 LIST_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
166
167 kcondvar_t wl_reclaimable_cv; /* m (obviously) */
168 size_t wl_reclaimable_bytes; /* m: Amount of space available for
169 reclamation by truncate */
170 int wl_error_count; /* m: # of wl_entries with errors */
171 size_t wl_reserved_bytes; /* never truncate log smaller than this */
172
173 #ifdef WAPBL_DEBUG_BUFBYTES
174 size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
175 #endif
176
177 daddr_t *wl_deallocblks;/* lm: address of block */
178 int *wl_dealloclens; /* lm: size of block */
179 int wl_dealloccnt; /* lm: total count */
180 int wl_dealloclim; /* l: max count */
181
182 /* hashtable of inode numbers for allocated but unlinked inodes */
183 /* synch ??? */
184 struct wapbl_ino_head *wl_inohash;
185 u_long wl_inohashmask;
186 int wl_inohashcnt;
187
188 SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* On disk transaction
189 accounting */
190
191 u_char *wl_buffer; /* l: buffer for wapbl_buffered_write() */
192 daddr_t wl_buffer_dblk; /* l: buffer disk block address */
193 size_t wl_buffer_used; /* l: buffer current use */
194 };
195
196 #ifdef WAPBL_DEBUG_PRINT
197 int wapbl_debug_print = WAPBL_DEBUG_PRINT;
198 #endif
199
200 /****************************************************************/
201 #ifdef _KERNEL
202
203 #ifdef WAPBL_DEBUG
204 struct wapbl *wapbl_debug_wl;
205 #endif
206
207 static int wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail);
208 static int wapbl_write_blocks(struct wapbl *wl, off_t *offp);
209 static int wapbl_write_revocations(struct wapbl *wl, off_t *offp);
210 static int wapbl_write_inodes(struct wapbl *wl, off_t *offp);
211 #endif /* _KERNEL */
212
213 static int wapbl_replay_process(struct wapbl_replay *wr, off_t, off_t);
214
215 static inline size_t wapbl_space_used(size_t avail, off_t head,
216 off_t tail);
217
218 #ifdef _KERNEL
219
220 static struct pool wapbl_entry_pool;
221
222 #define WAPBL_INODETRK_SIZE 83
223 static int wapbl_ino_pool_refcount;
224 static struct pool wapbl_ino_pool;
225 struct wapbl_ino {
226 LIST_ENTRY(wapbl_ino) wi_hash;
227 ino_t wi_ino;
228 mode_t wi_mode;
229 };
230
231 static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
232 static void wapbl_inodetrk_free(struct wapbl *wl);
233 static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
234
235 static size_t wapbl_transaction_len(struct wapbl *wl);
236 static inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
237
238 #if 0
239 int wapbl_replay_verify(struct wapbl_replay *, struct vnode *);
240 #endif
241
242 static int wapbl_replay_isopen1(struct wapbl_replay *);
243
244 /*
245 * This is useful for debugging. If set, the log will
246 * only be truncated when necessary.
247 */
248 int wapbl_lazy_truncate = 0;
249
250 struct wapbl_ops wapbl_ops = {
251 .wo_wapbl_discard = wapbl_discard,
252 .wo_wapbl_replay_isopen = wapbl_replay_isopen1,
253 .wo_wapbl_replay_can_read = wapbl_replay_can_read,
254 .wo_wapbl_replay_read = wapbl_replay_read,
255 .wo_wapbl_add_buf = wapbl_add_buf,
256 .wo_wapbl_remove_buf = wapbl_remove_buf,
257 .wo_wapbl_resize_buf = wapbl_resize_buf,
258 .wo_wapbl_begin = wapbl_begin,
259 .wo_wapbl_end = wapbl_end,
260 .wo_wapbl_junlock_assert= wapbl_junlock_assert,
261
262 /* XXX: the following is only used to say "this is a wapbl buf" */
263 .wo_wapbl_biodone = wapbl_biodone,
264 };
265
266 static int
267 wapbl_sysctl_init(void)
268 {
269 int rv;
270 const struct sysctlnode *rnode, *cnode;
271
272 wapbl_sysctl = NULL;
273
274 rv = sysctl_createv(&wapbl_sysctl, 0, NULL, &rnode,
275 CTLFLAG_PERMANENT,
276 CTLTYPE_NODE, "wapbl",
277 SYSCTL_DESCR("WAPBL journaling options"),
278 NULL, 0, NULL, 0,
279 CTL_VFS, CTL_CREATE, CTL_EOL);
280 if (rv)
281 return rv;
282
283 rv = sysctl_createv(&wapbl_sysctl, 0, &rnode, &cnode,
284 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
285 CTLTYPE_INT, "flush_disk_cache",
286 SYSCTL_DESCR("flush disk cache"),
287 NULL, 0, &wapbl_flush_disk_cache, 0,
288 CTL_CREATE, CTL_EOL);
289 if (rv)
290 return rv;
291
292 rv = sysctl_createv(&wapbl_sysctl, 0, &rnode, &cnode,
293 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
294 CTLTYPE_INT, "verbose_commit",
295 SYSCTL_DESCR("show time and size of wapbl log commits"),
296 NULL, 0, &wapbl_verbose_commit, 0,
297 CTL_CREATE, CTL_EOL);
298 return rv;
299 }
300
301 static void
302 wapbl_init(void)
303 {
304
305 pool_init(&wapbl_entry_pool, sizeof(struct wapbl_entry), 0, 0, 0,
306 "wapblentrypl", &pool_allocator_kmem, IPL_VM);
307
308 wapbl_sysctl_init();
309 }
310
311 static int
312 wapbl_fini(bool interface)
313 {
314
315 if (wapbl_sysctl != NULL)
316 sysctl_teardown(&wapbl_sysctl);
317
318 pool_destroy(&wapbl_entry_pool);
319
320 return 0;
321 }
322
323 static int
324 wapbl_start_flush_inodes(struct wapbl *wl, struct wapbl_replay *wr)
325 {
326 int error, i;
327
328 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
329 ("wapbl_start: reusing log with %d inodes\n", wr->wr_inodescnt));
330
331 /*
332 * Its only valid to reuse the replay log if its
333 * the same as the new log we just opened.
334 */
335 KDASSERT(!wapbl_replay_isopen(wr));
336 KASSERT(wl->wl_devvp->v_type == VBLK);
337 KASSERT(wr->wr_devvp->v_type == VBLK);
338 KASSERT(wl->wl_devvp->v_rdev == wr->wr_devvp->v_rdev);
339 KASSERT(wl->wl_logpbn == wr->wr_logpbn);
340 KASSERT(wl->wl_circ_size == wr->wr_circ_size);
341 KASSERT(wl->wl_circ_off == wr->wr_circ_off);
342 KASSERT(wl->wl_log_dev_bshift == wr->wr_log_dev_bshift);
343 KASSERT(wl->wl_fs_dev_bshift == wr->wr_fs_dev_bshift);
344
345 wl->wl_wc_header->wc_generation = wr->wr_generation + 1;
346
347 for (i = 0; i < wr->wr_inodescnt; i++)
348 wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
349 wr->wr_inodes[i].wr_imode);
350
351 /* Make sure new transaction won't overwrite old inodes list */
352 KDASSERT(wapbl_transaction_len(wl) <=
353 wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
354 wr->wr_inodestail));
355
356 wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
357 wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
358 wapbl_transaction_len(wl);
359
360 error = wapbl_write_inodes(wl, &wl->wl_head);
361 if (error)
362 return error;
363
364 KASSERT(wl->wl_head != wl->wl_tail);
365 KASSERT(wl->wl_head != 0);
366
367 return 0;
368 }
369
370 int
371 wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
372 daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
373 wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
374 {
375 struct wapbl *wl;
376 struct vnode *devvp;
377 daddr_t logpbn;
378 int error;
379 int log_dev_bshift = ilog2(blksize);
380 int fs_dev_bshift = log_dev_bshift;
381 int run;
382
383 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_start: vp=%p off=%" PRId64
384 " count=%zu blksize=%zu\n", vp, off, count, blksize));
385
386 if (log_dev_bshift > fs_dev_bshift) {
387 WAPBL_PRINTF(WAPBL_PRINT_OPEN,
388 ("wapbl: log device's block size cannot be larger "
389 "than filesystem's\n"));
390 /*
391 * Not currently implemented, although it could be if
392 * needed someday.
393 */
394 return ENOSYS;
395 }
396
397 if (off < 0)
398 return EINVAL;
399
400 if (blksize < DEV_BSIZE)
401 return EINVAL;
402 if (blksize % DEV_BSIZE)
403 return EINVAL;
404
405 /* XXXTODO: verify that the full load is writable */
406
407 /*
408 * XXX check for minimum log size
409 * minimum is governed by minimum amount of space
410 * to complete a transaction. (probably truncate)
411 */
412 /* XXX for now pick something minimal */
413 if ((count * blksize) < MAXPHYS) {
414 return ENOSPC;
415 }
416
417 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
418 return error;
419 }
420
421 wl = wapbl_calloc(1, sizeof(*wl));
422 rw_init(&wl->wl_rwlock);
423 mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
424 cv_init(&wl->wl_reclaimable_cv, "wapblrec");
425 LIST_INIT(&wl->wl_bufs);
426 SIMPLEQ_INIT(&wl->wl_entries);
427
428 wl->wl_logvp = vp;
429 wl->wl_devvp = devvp;
430 wl->wl_mount = mp;
431 wl->wl_logpbn = logpbn;
432 wl->wl_log_dev_bshift = log_dev_bshift;
433 wl->wl_fs_dev_bshift = fs_dev_bshift;
434
435 wl->wl_flush = flushfn;
436 wl->wl_flush_abort = flushabortfn;
437
438 /* Reserve two log device blocks for the commit headers */
439 wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
440 wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
441 /* truncate the log usage to a multiple of log_dev_bshift */
442 wl->wl_circ_size >>= wl->wl_log_dev_bshift;
443 wl->wl_circ_size <<= wl->wl_log_dev_bshift;
444
445 /*
446 * wl_bufbytes_max limits the size of the in memory transaction space.
447 * - Since buffers are allocated and accounted for in units of
448 * PAGE_SIZE it is required to be a multiple of PAGE_SIZE
449 * (i.e. 1<<PAGE_SHIFT)
450 * - Since the log device has to be written in units of
451 * 1<<wl_log_dev_bshift it is required to be a mulitple of
452 * 1<<wl_log_dev_bshift.
453 * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
454 * it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
455 * Therefore it must be multiple of the least common multiple of those
456 * three quantities. Fortunately, all of those quantities are
457 * guaranteed to be a power of two, and the least common multiple of
458 * a set of numbers which are all powers of two is simply the maximum
459 * of those numbers. Finally, the maximum logarithm of a power of two
460 * is the same as the log of the maximum power of two. So we can do
461 * the following operations to size wl_bufbytes_max:
462 */
463
464 /* XXX fix actual number of pages reserved per filesystem. */
465 wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
466
467 /* Round wl_bufbytes_max to the largest power of two constraint */
468 wl->wl_bufbytes_max >>= PAGE_SHIFT;
469 wl->wl_bufbytes_max <<= PAGE_SHIFT;
470 wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
471 wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
472 wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
473 wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
474
475 /* XXX maybe use filesystem fragment size instead of 1024 */
476 /* XXX fix actual number of buffers reserved per filesystem. */
477 wl->wl_bufcount_max = (nbuf / 2) * 1024;
478
479 /* XXX tie this into resource estimation */
480 wl->wl_dealloclim = wl->wl_bufbytes_max / mp->mnt_stat.f_bsize / 2;
481
482 wl->wl_deallocblks = wapbl_alloc(sizeof(*wl->wl_deallocblks) *
483 wl->wl_dealloclim);
484 wl->wl_dealloclens = wapbl_alloc(sizeof(*wl->wl_dealloclens) *
485 wl->wl_dealloclim);
486
487 wl->wl_buffer = wapbl_alloc(MAXPHYS);
488 wl->wl_buffer_used = 0;
489
490 wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
491
492 /* Initialize the commit header */
493 {
494 struct wapbl_wc_header *wc;
495 size_t len = 1 << wl->wl_log_dev_bshift;
496 wc = wapbl_calloc(1, len);
497 wc->wc_type = WAPBL_WC_HEADER;
498 wc->wc_len = len;
499 wc->wc_circ_off = wl->wl_circ_off;
500 wc->wc_circ_size = wl->wl_circ_size;
501 /* XXX wc->wc_fsid */
502 wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
503 wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
504 wl->wl_wc_header = wc;
505 wl->wl_wc_scratch = wapbl_alloc(len);
506 }
507
508 /*
509 * if there was an existing set of unlinked but
510 * allocated inodes, preserve it in the new
511 * log.
512 */
513 if (wr && wr->wr_inodescnt) {
514 error = wapbl_start_flush_inodes(wl, wr);
515 if (error)
516 goto errout;
517 }
518
519 error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
520 if (error) {
521 goto errout;
522 }
523
524 *wlp = wl;
525 #if defined(WAPBL_DEBUG)
526 wapbl_debug_wl = wl;
527 #endif
528
529 return 0;
530 errout:
531 wapbl_discard(wl);
532 wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
533 wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
534 wapbl_free(wl->wl_deallocblks,
535 sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim);
536 wapbl_free(wl->wl_dealloclens,
537 sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim);
538 wapbl_free(wl->wl_buffer, MAXPHYS);
539 wapbl_inodetrk_free(wl);
540 wapbl_free(wl, sizeof(*wl));
541
542 return error;
543 }
544
545 /*
546 * Like wapbl_flush, only discards the transaction
547 * completely
548 */
549
550 void
551 wapbl_discard(struct wapbl *wl)
552 {
553 struct wapbl_entry *we;
554 struct buf *bp;
555 int i;
556
557 /*
558 * XXX we may consider using upgrade here
559 * if we want to call flush from inside a transaction
560 */
561 rw_enter(&wl->wl_rwlock, RW_WRITER);
562 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
563 wl->wl_dealloccnt);
564
565 #ifdef WAPBL_DEBUG_PRINT
566 {
567 pid_t pid = -1;
568 lwpid_t lid = -1;
569 if (curproc)
570 pid = curproc->p_pid;
571 if (curlwp)
572 lid = curlwp->l_lid;
573 #ifdef WAPBL_DEBUG_BUFBYTES
574 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
575 ("wapbl_discard: thread %d.%d discarding "
576 "transaction\n"
577 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
578 "deallocs=%d inodes=%d\n"
579 "\terrcnt = %u, reclaimable=%zu reserved=%zu "
580 "unsynced=%zu\n",
581 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
582 wl->wl_bcount, wl->wl_dealloccnt,
583 wl->wl_inohashcnt, wl->wl_error_count,
584 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
585 wl->wl_unsynced_bufbytes));
586 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
587 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
588 ("\tentry: bufcount = %zu, reclaimable = %zu, "
589 "error = %d, unsynced = %zu\n",
590 we->we_bufcount, we->we_reclaimable_bytes,
591 we->we_error, we->we_unsynced_bufbytes));
592 }
593 #else /* !WAPBL_DEBUG_BUFBYTES */
594 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
595 ("wapbl_discard: thread %d.%d discarding transaction\n"
596 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
597 "deallocs=%d inodes=%d\n"
598 "\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
599 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
600 wl->wl_bcount, wl->wl_dealloccnt,
601 wl->wl_inohashcnt, wl->wl_error_count,
602 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes));
603 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
604 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
605 ("\tentry: bufcount = %zu, reclaimable = %zu, "
606 "error = %d\n",
607 we->we_bufcount, we->we_reclaimable_bytes,
608 we->we_error));
609 }
610 #endif /* !WAPBL_DEBUG_BUFBYTES */
611 }
612 #endif /* WAPBL_DEBUG_PRINT */
613
614 for (i = 0; i <= wl->wl_inohashmask; i++) {
615 struct wapbl_ino_head *wih;
616 struct wapbl_ino *wi;
617
618 wih = &wl->wl_inohash[i];
619 while ((wi = LIST_FIRST(wih)) != NULL) {
620 LIST_REMOVE(wi, wi_hash);
621 pool_put(&wapbl_ino_pool, wi);
622 KASSERT(wl->wl_inohashcnt > 0);
623 wl->wl_inohashcnt--;
624 }
625 }
626
627 /*
628 * clean buffer list
629 */
630 mutex_enter(&bufcache_lock);
631 mutex_enter(&wl->wl_mtx);
632 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
633 if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
634 /*
635 * The buffer will be unlocked and
636 * removed from the transaction in brelse
637 */
638 mutex_exit(&wl->wl_mtx);
639 brelsel(bp, 0);
640 mutex_enter(&wl->wl_mtx);
641 }
642 }
643 mutex_exit(&wl->wl_mtx);
644 mutex_exit(&bufcache_lock);
645
646 /*
647 * Remove references to this wl from wl_entries, free any which
648 * no longer have buffers, others will be freed in wapbl_biodone
649 * when they no longer have any buffers.
650 */
651 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
652 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
653 /* XXX should we be accumulating wl_error_count
654 * and increasing reclaimable bytes ? */
655 we->we_wapbl = NULL;
656 if (we->we_bufcount == 0) {
657 #ifdef WAPBL_DEBUG_BUFBYTES
658 KASSERT(we->we_unsynced_bufbytes == 0);
659 #endif
660 pool_put(&wapbl_entry_pool, we);
661 }
662 }
663
664 /* Discard list of deallocs */
665 wl->wl_dealloccnt = 0;
666 /* XXX should we clear wl_reserved_bytes? */
667
668 KASSERT(wl->wl_bufbytes == 0);
669 KASSERT(wl->wl_bcount == 0);
670 KASSERT(wl->wl_bufcount == 0);
671 KASSERT(LIST_EMPTY(&wl->wl_bufs));
672 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
673 KASSERT(wl->wl_inohashcnt == 0);
674
675 rw_exit(&wl->wl_rwlock);
676 }
677
678 int
679 wapbl_stop(struct wapbl *wl, int force)
680 {
681 int error;
682
683 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
684 error = wapbl_flush(wl, 1);
685 if (error) {
686 if (force)
687 wapbl_discard(wl);
688 else
689 return error;
690 }
691
692 /* Unlinked inodes persist after a flush */
693 if (wl->wl_inohashcnt) {
694 if (force) {
695 wapbl_discard(wl);
696 } else {
697 return EBUSY;
698 }
699 }
700
701 KASSERT(wl->wl_bufbytes == 0);
702 KASSERT(wl->wl_bcount == 0);
703 KASSERT(wl->wl_bufcount == 0);
704 KASSERT(LIST_EMPTY(&wl->wl_bufs));
705 KASSERT(wl->wl_dealloccnt == 0);
706 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
707 KASSERT(wl->wl_inohashcnt == 0);
708
709 wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
710 wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
711 wapbl_free(wl->wl_deallocblks,
712 sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim);
713 wapbl_free(wl->wl_dealloclens,
714 sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim);
715 wapbl_free(wl->wl_buffer, MAXPHYS);
716 wapbl_inodetrk_free(wl);
717
718 cv_destroy(&wl->wl_reclaimable_cv);
719 mutex_destroy(&wl->wl_mtx);
720 rw_destroy(&wl->wl_rwlock);
721 wapbl_free(wl, sizeof(*wl));
722
723 return 0;
724 }
725
726 static int
727 wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
728 {
729 struct pstats *pstats = curlwp->l_proc->p_stats;
730 struct buf *bp;
731 int error;
732
733 KASSERT((flags & ~(B_WRITE | B_READ)) == 0);
734 KASSERT(devvp->v_type == VBLK);
735
736 if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
737 mutex_enter(devvp->v_interlock);
738 devvp->v_numoutput++;
739 mutex_exit(devvp->v_interlock);
740 pstats->p_ru.ru_oublock++;
741 } else {
742 pstats->p_ru.ru_inblock++;
743 }
744
745 bp = getiobuf(devvp, true);
746 bp->b_flags = flags;
747 bp->b_cflags = BC_BUSY; /* silly & dubious */
748 bp->b_dev = devvp->v_rdev;
749 bp->b_data = data;
750 bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
751 bp->b_blkno = pbn;
752 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
753
754 WAPBL_PRINTF(WAPBL_PRINT_IO,
755 ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%"PRIx64"\n",
756 BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
757 bp->b_blkno, bp->b_dev));
758
759 VOP_STRATEGY(devvp, bp);
760
761 error = biowait(bp);
762 putiobuf(bp);
763
764 if (error) {
765 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
766 ("wapbl_doio: %s %zu bytes at block %" PRId64
767 " on dev 0x%"PRIx64" failed with error %d\n",
768 (((flags & (B_WRITE | B_READ)) == B_WRITE) ?
769 "write" : "read"),
770 len, pbn, devvp->v_rdev, error));
771 }
772
773 return error;
774 }
775
776 int
777 wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
778 {
779
780 return wapbl_doio(data, len, devvp, pbn, B_WRITE);
781 }
782
783 int
784 wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
785 {
786
787 return wapbl_doio(data, len, devvp, pbn, B_READ);
788 }
789
790 /*
791 * Flush buffered data if any.
792 */
793 static int
794 wapbl_buffered_flush(struct wapbl *wl)
795 {
796 int error;
797
798 if (wl->wl_buffer_used == 0)
799 return 0;
800
801 error = wapbl_doio(wl->wl_buffer, wl->wl_buffer_used,
802 wl->wl_devvp, wl->wl_buffer_dblk, B_WRITE);
803 wl->wl_buffer_used = 0;
804
805 return error;
806 }
807
808 /*
809 * Write data to the log.
810 * Try to coalesce writes and emit MAXPHYS aligned blocks.
811 */
812 static int
813 wapbl_buffered_write(void *data, size_t len, struct wapbl *wl, daddr_t pbn)
814 {
815 int error;
816 size_t resid;
817
818 /*
819 * If not adjacent to buffered data flush first. Disk block
820 * address is always valid for non-empty buffer.
821 */
822 if (wl->wl_buffer_used > 0 &&
823 pbn != wl->wl_buffer_dblk + btodb(wl->wl_buffer_used)) {
824 error = wapbl_buffered_flush(wl);
825 if (error)
826 return error;
827 }
828 /*
829 * If this write goes to an empty buffer we have to
830 * save the disk block address first.
831 */
832 if (wl->wl_buffer_used == 0)
833 wl->wl_buffer_dblk = pbn;
834 /*
835 * Remaining space so this buffer ends on a MAXPHYS boundary.
836 *
837 * Cannot become less or equal zero as the buffer would have been
838 * flushed on the last call then.
839 */
840 resid = MAXPHYS - dbtob(wl->wl_buffer_dblk % btodb(MAXPHYS)) -
841 wl->wl_buffer_used;
842 KASSERT(resid > 0);
843 KASSERT(dbtob(btodb(resid)) == resid);
844 if (len >= resid) {
845 memcpy(wl->wl_buffer + wl->wl_buffer_used, data, resid);
846 wl->wl_buffer_used += resid;
847 error = wapbl_doio(wl->wl_buffer, wl->wl_buffer_used,
848 wl->wl_devvp, wl->wl_buffer_dblk, B_WRITE);
849 data = (uint8_t *)data + resid;
850 len -= resid;
851 wl->wl_buffer_dblk = pbn + btodb(resid);
852 wl->wl_buffer_used = 0;
853 if (error)
854 return error;
855 }
856 KASSERT(len < MAXPHYS);
857 if (len > 0) {
858 memcpy(wl->wl_buffer + wl->wl_buffer_used, data, len);
859 wl->wl_buffer_used += len;
860 }
861
862 return 0;
863 }
864
865 /*
866 * Off is byte offset returns new offset for next write
867 * handles log wraparound
868 */
869 static int
870 wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
871 {
872 size_t slen;
873 off_t off = *offp;
874 int error;
875 daddr_t pbn;
876
877 KDASSERT(((len >> wl->wl_log_dev_bshift) <<
878 wl->wl_log_dev_bshift) == len);
879
880 if (off < wl->wl_circ_off)
881 off = wl->wl_circ_off;
882 slen = wl->wl_circ_off + wl->wl_circ_size - off;
883 if (slen < len) {
884 pbn = wl->wl_logpbn + (off >> wl->wl_log_dev_bshift);
885 #ifdef _KERNEL
886 pbn = btodb(pbn << wl->wl_log_dev_bshift);
887 #endif
888 error = wapbl_buffered_write(data, slen, wl, pbn);
889 if (error)
890 return error;
891 data = (uint8_t *)data + slen;
892 len -= slen;
893 off = wl->wl_circ_off;
894 }
895 pbn = wl->wl_logpbn + (off >> wl->wl_log_dev_bshift);
896 #ifdef _KERNEL
897 pbn = btodb(pbn << wl->wl_log_dev_bshift);
898 #endif
899 error = wapbl_buffered_write(data, len, wl, pbn);
900 if (error)
901 return error;
902 off += len;
903 if (off >= wl->wl_circ_off + wl->wl_circ_size)
904 off = wl->wl_circ_off;
905 *offp = off;
906 return 0;
907 }
908
909 /****************************************************************/
910
911 int
912 wapbl_begin(struct wapbl *wl, const char *file, int line)
913 {
914 int doflush;
915 unsigned lockcount;
916
917 KDASSERT(wl);
918
919 /*
920 * XXX this needs to be made much more sophisticated.
921 * perhaps each wapbl_begin could reserve a specified
922 * number of buffers and bytes.
923 */
924 mutex_enter(&wl->wl_mtx);
925 lockcount = wl->wl_lock_count;
926 doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) >
927 wl->wl_bufbytes_max / 2) ||
928 ((wl->wl_bufcount + (lockcount * 10)) >
929 wl->wl_bufcount_max / 2) ||
930 (wapbl_transaction_len(wl) > wl->wl_circ_size / 2) ||
931 (wl->wl_dealloccnt >= (wl->wl_dealloclim / 2));
932 mutex_exit(&wl->wl_mtx);
933
934 if (doflush) {
935 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
936 ("force flush lockcnt=%d bufbytes=%zu "
937 "(max=%zu) bufcount=%zu (max=%zu) "
938 "dealloccnt %d (lim=%d)\n",
939 lockcount, wl->wl_bufbytes,
940 wl->wl_bufbytes_max, wl->wl_bufcount,
941 wl->wl_bufcount_max,
942 wl->wl_dealloccnt, wl->wl_dealloclim));
943 }
944
945 if (doflush) {
946 int error = wapbl_flush(wl, 0);
947 if (error)
948 return error;
949 }
950
951 rw_enter(&wl->wl_rwlock, RW_READER);
952 mutex_enter(&wl->wl_mtx);
953 wl->wl_lock_count++;
954 mutex_exit(&wl->wl_mtx);
955
956 #if defined(WAPBL_DEBUG_PRINT)
957 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
958 ("wapbl_begin thread %d.%d with bufcount=%zu "
959 "bufbytes=%zu bcount=%zu at %s:%d\n",
960 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
961 wl->wl_bufbytes, wl->wl_bcount, file, line));
962 #endif
963
964 return 0;
965 }
966
967 void
968 wapbl_end(struct wapbl *wl)
969 {
970
971 #if defined(WAPBL_DEBUG_PRINT)
972 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
973 ("wapbl_end thread %d.%d with bufcount=%zu "
974 "bufbytes=%zu bcount=%zu\n",
975 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
976 wl->wl_bufbytes, wl->wl_bcount));
977 #endif
978
979 /*
980 * XXX this could be handled more gracefully, perhaps place
981 * only a partial transaction in the log and allow the
982 * remaining to flush without the protection of the journal.
983 */
984 KASSERTMSG((wapbl_transaction_len(wl) <=
985 (wl->wl_circ_size - wl->wl_reserved_bytes)),
986 "wapbl_end: current transaction too big to flush");
987
988 mutex_enter(&wl->wl_mtx);
989 KASSERT(wl->wl_lock_count > 0);
990 wl->wl_lock_count--;
991 mutex_exit(&wl->wl_mtx);
992
993 rw_exit(&wl->wl_rwlock);
994 }
995
996 void
997 wapbl_add_buf(struct wapbl *wl, struct buf * bp)
998 {
999
1000 KASSERT(bp->b_cflags & BC_BUSY);
1001 KASSERT(bp->b_vp);
1002
1003 wapbl_jlock_assert(wl);
1004
1005 #if 0
1006 /*
1007 * XXX this might be an issue for swapfiles.
1008 * see uvm_swap.c:1702
1009 *
1010 * XXX2 why require it then? leap of semantics?
1011 */
1012 KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
1013 #endif
1014
1015 mutex_enter(&wl->wl_mtx);
1016 if (bp->b_flags & B_LOCKED) {
1017 LIST_REMOVE(bp, b_wapbllist);
1018 WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
1019 ("wapbl_add_buf thread %d.%d re-adding buf %p "
1020 "with %d bytes %d bcount\n",
1021 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
1022 bp->b_bcount));
1023 } else {
1024 /* unlocked by dirty buffers shouldn't exist */
1025 KASSERT(!(bp->b_oflags & BO_DELWRI));
1026 wl->wl_bufbytes += bp->b_bufsize;
1027 wl->wl_bcount += bp->b_bcount;
1028 wl->wl_bufcount++;
1029 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
1030 ("wapbl_add_buf thread %d.%d adding buf %p "
1031 "with %d bytes %d bcount\n",
1032 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
1033 bp->b_bcount));
1034 }
1035 LIST_INSERT_HEAD(&wl->wl_bufs, bp, b_wapbllist);
1036 mutex_exit(&wl->wl_mtx);
1037
1038 bp->b_flags |= B_LOCKED;
1039 }
1040
1041 static void
1042 wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
1043 {
1044
1045 KASSERT(mutex_owned(&wl->wl_mtx));
1046 KASSERT(bp->b_cflags & BC_BUSY);
1047 wapbl_jlock_assert(wl);
1048
1049 #if 0
1050 /*
1051 * XXX this might be an issue for swapfiles.
1052 * see uvm_swap.c:1725
1053 *
1054 * XXXdeux: see above
1055 */
1056 KASSERT((bp->b_flags & BC_NOCACHE) == 0);
1057 #endif
1058 KASSERT(bp->b_flags & B_LOCKED);
1059
1060 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
1061 ("wapbl_remove_buf thread %d.%d removing buf %p with "
1062 "%d bytes %d bcount\n",
1063 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, bp->b_bcount));
1064
1065 KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
1066 wl->wl_bufbytes -= bp->b_bufsize;
1067 KASSERT(wl->wl_bcount >= bp->b_bcount);
1068 wl->wl_bcount -= bp->b_bcount;
1069 KASSERT(wl->wl_bufcount > 0);
1070 wl->wl_bufcount--;
1071 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1072 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1073 LIST_REMOVE(bp, b_wapbllist);
1074
1075 bp->b_flags &= ~B_LOCKED;
1076 }
1077
1078 /* called from brelsel() in vfs_bio among other places */
1079 void
1080 wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
1081 {
1082
1083 mutex_enter(&wl->wl_mtx);
1084 wapbl_remove_buf_locked(wl, bp);
1085 mutex_exit(&wl->wl_mtx);
1086 }
1087
1088 void
1089 wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
1090 {
1091
1092 KASSERT(bp->b_cflags & BC_BUSY);
1093
1094 /*
1095 * XXX: why does this depend on B_LOCKED? otherwise the buf
1096 * is not for a transaction? if so, why is this called in the
1097 * first place?
1098 */
1099 if (bp->b_flags & B_LOCKED) {
1100 mutex_enter(&wl->wl_mtx);
1101 wl->wl_bufbytes += bp->b_bufsize - oldsz;
1102 wl->wl_bcount += bp->b_bcount - oldcnt;
1103 mutex_exit(&wl->wl_mtx);
1104 }
1105 }
1106
1107 #endif /* _KERNEL */
1108
1109 /****************************************************************/
1110 /* Some utility inlines */
1111
1112 static inline size_t
1113 wapbl_space_used(size_t avail, off_t head, off_t tail)
1114 {
1115
1116 if (tail == 0) {
1117 KASSERT(head == 0);
1118 return 0;
1119 }
1120 return ((head + (avail - 1) - tail) % avail) + 1;
1121 }
1122
1123 #ifdef _KERNEL
1124 /* This is used to advance the pointer at old to new value at old+delta */
1125 static inline off_t
1126 wapbl_advance(size_t size, size_t off, off_t oldoff, size_t delta)
1127 {
1128 off_t newoff;
1129
1130 /* Define acceptable ranges for inputs. */
1131 KASSERT(delta <= (size_t)size);
1132 KASSERT((oldoff == 0) || ((size_t)oldoff >= off));
1133 KASSERT(oldoff < (off_t)(size + off));
1134
1135 if ((oldoff == 0) && (delta != 0))
1136 newoff = off + delta;
1137 else if ((oldoff + delta) < (size + off))
1138 newoff = oldoff + delta;
1139 else
1140 newoff = (oldoff + delta) - size;
1141
1142 /* Note some interesting axioms */
1143 KASSERT((delta != 0) || (newoff == oldoff));
1144 KASSERT((delta == 0) || (newoff != 0));
1145 KASSERT((delta != (size)) || (newoff == oldoff));
1146
1147 /* Define acceptable ranges for output. */
1148 KASSERT((newoff == 0) || ((size_t)newoff >= off));
1149 KASSERT((size_t)newoff < (size + off));
1150 return newoff;
1151 }
1152
1153 static inline size_t
1154 wapbl_space_free(size_t avail, off_t head, off_t tail)
1155 {
1156
1157 return avail - wapbl_space_used(avail, head, tail);
1158 }
1159
1160 static inline void
1161 wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1162 off_t *tailp)
1163 {
1164 off_t head = *headp;
1165 off_t tail = *tailp;
1166
1167 KASSERT(delta <= wapbl_space_free(size, head, tail));
1168 head = wapbl_advance(size, off, head, delta);
1169 if ((tail == 0) && (head != 0))
1170 tail = off;
1171 *headp = head;
1172 *tailp = tail;
1173 }
1174
1175 static inline void
1176 wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1177 off_t *tailp)
1178 {
1179 off_t head = *headp;
1180 off_t tail = *tailp;
1181
1182 KASSERT(delta <= wapbl_space_used(size, head, tail));
1183 tail = wapbl_advance(size, off, tail, delta);
1184 if (head == tail) {
1185 head = tail = 0;
1186 }
1187 *headp = head;
1188 *tailp = tail;
1189 }
1190
1191
1192 /****************************************************************/
1193
1194 /*
1195 * Remove transactions whose buffers are completely flushed to disk.
1196 * Will block until at least minfree space is available.
1197 * only intended to be called from inside wapbl_flush and therefore
1198 * does not protect against commit races with itself or with flush.
1199 */
1200 static int
1201 wapbl_truncate(struct wapbl *wl, size_t minfree, int waitonly)
1202 {
1203 size_t delta;
1204 size_t avail;
1205 off_t head;
1206 off_t tail;
1207 int error = 0;
1208
1209 KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1210 KASSERT(rw_write_held(&wl->wl_rwlock));
1211
1212 mutex_enter(&wl->wl_mtx);
1213
1214 /*
1215 * First check to see if we have to do a commit
1216 * at all.
1217 */
1218 avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1219 if (minfree < avail) {
1220 mutex_exit(&wl->wl_mtx);
1221 return 0;
1222 }
1223 minfree -= avail;
1224 while ((wl->wl_error_count == 0) &&
1225 (wl->wl_reclaimable_bytes < minfree)) {
1226 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1227 ("wapbl_truncate: sleeping on %p wl=%p bytes=%zd "
1228 "minfree=%zd\n",
1229 &wl->wl_reclaimable_bytes, wl, wl->wl_reclaimable_bytes,
1230 minfree));
1231
1232 cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1233 }
1234 if (wl->wl_reclaimable_bytes < minfree) {
1235 KASSERT(wl->wl_error_count);
1236 /* XXX maybe get actual error from buffer instead someday? */
1237 error = EIO;
1238 }
1239 head = wl->wl_head;
1240 tail = wl->wl_tail;
1241 delta = wl->wl_reclaimable_bytes;
1242
1243 /* If all of of the entries are flushed, then be sure to keep
1244 * the reserved bytes reserved. Watch out for discarded transactions,
1245 * which could leave more bytes reserved than are reclaimable.
1246 */
1247 if (SIMPLEQ_EMPTY(&wl->wl_entries) &&
1248 (delta >= wl->wl_reserved_bytes)) {
1249 delta -= wl->wl_reserved_bytes;
1250 }
1251 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1252 &tail);
1253 KDASSERT(wl->wl_reserved_bytes <=
1254 wapbl_space_used(wl->wl_circ_size, head, tail));
1255 mutex_exit(&wl->wl_mtx);
1256
1257 if (error)
1258 return error;
1259
1260 if (waitonly)
1261 return 0;
1262
1263 /*
1264 * This is where head, tail and delta are unprotected
1265 * from races against itself or flush. This is ok since
1266 * we only call this routine from inside flush itself.
1267 *
1268 * XXX: how can it race against itself when accessed only
1269 * from behind the write-locked rwlock?
1270 */
1271 error = wapbl_write_commit(wl, head, tail);
1272 if (error)
1273 return error;
1274
1275 wl->wl_head = head;
1276 wl->wl_tail = tail;
1277
1278 mutex_enter(&wl->wl_mtx);
1279 KASSERT(wl->wl_reclaimable_bytes >= delta);
1280 wl->wl_reclaimable_bytes -= delta;
1281 mutex_exit(&wl->wl_mtx);
1282 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1283 ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1284 curproc->p_pid, curlwp->l_lid, delta));
1285
1286 return 0;
1287 }
1288
1289 /****************************************************************/
1290
1291 void
1292 wapbl_biodone(struct buf *bp)
1293 {
1294 struct wapbl_entry *we = bp->b_private;
1295 struct wapbl *wl = we->we_wapbl;
1296 #ifdef WAPBL_DEBUG_BUFBYTES
1297 const int bufsize = bp->b_bufsize;
1298 #endif
1299
1300 /*
1301 * Handle possible flushing of buffers after log has been
1302 * decomissioned.
1303 */
1304 if (!wl) {
1305 KASSERT(we->we_bufcount > 0);
1306 we->we_bufcount--;
1307 #ifdef WAPBL_DEBUG_BUFBYTES
1308 KASSERT(we->we_unsynced_bufbytes >= bufsize);
1309 we->we_unsynced_bufbytes -= bufsize;
1310 #endif
1311
1312 if (we->we_bufcount == 0) {
1313 #ifdef WAPBL_DEBUG_BUFBYTES
1314 KASSERT(we->we_unsynced_bufbytes == 0);
1315 #endif
1316 pool_put(&wapbl_entry_pool, we);
1317 }
1318
1319 brelse(bp, 0);
1320 return;
1321 }
1322
1323 #ifdef ohbother
1324 KDASSERT(bp->b_oflags & BO_DONE);
1325 KDASSERT(!(bp->b_oflags & BO_DELWRI));
1326 KDASSERT(bp->b_flags & B_ASYNC);
1327 KDASSERT(bp->b_cflags & BC_BUSY);
1328 KDASSERT(!(bp->b_flags & B_LOCKED));
1329 KDASSERT(!(bp->b_flags & B_READ));
1330 KDASSERT(!(bp->b_cflags & BC_INVAL));
1331 KDASSERT(!(bp->b_cflags & BC_NOCACHE));
1332 #endif
1333
1334 if (bp->b_error) {
1335 #ifdef notyet /* Can't currently handle possible dirty buffer reuse */
1336 /*
1337 * XXXpooka: interfaces not fully updated
1338 * Note: this was not enabled in the original patch
1339 * against netbsd4 either. I don't know if comment
1340 * above is true or not.
1341 */
1342
1343 /*
1344 * If an error occurs, report the error and leave the
1345 * buffer as a delayed write on the LRU queue.
1346 * restarting the write would likely result in
1347 * an error spinloop, so let it be done harmlessly
1348 * by the syncer.
1349 */
1350 bp->b_flags &= ~(B_DONE);
1351 simple_unlock(&bp->b_interlock);
1352
1353 if (we->we_error == 0) {
1354 mutex_enter(&wl->wl_mtx);
1355 wl->wl_error_count++;
1356 mutex_exit(&wl->wl_mtx);
1357 cv_broadcast(&wl->wl_reclaimable_cv);
1358 }
1359 we->we_error = bp->b_error;
1360 bp->b_error = 0;
1361 brelse(bp);
1362 return;
1363 #else
1364 /* For now, just mark the log permanently errored out */
1365
1366 mutex_enter(&wl->wl_mtx);
1367 if (wl->wl_error_count == 0) {
1368 wl->wl_error_count++;
1369 cv_broadcast(&wl->wl_reclaimable_cv);
1370 }
1371 mutex_exit(&wl->wl_mtx);
1372 #endif
1373 }
1374
1375 /*
1376 * Release the buffer here. wapbl_flush() may wait for the
1377 * log to become empty and we better unbusy the buffer before
1378 * wapbl_flush() returns.
1379 */
1380 brelse(bp, 0);
1381
1382 mutex_enter(&wl->wl_mtx);
1383
1384 KASSERT(we->we_bufcount > 0);
1385 we->we_bufcount--;
1386 #ifdef WAPBL_DEBUG_BUFBYTES
1387 KASSERT(we->we_unsynced_bufbytes >= bufsize);
1388 we->we_unsynced_bufbytes -= bufsize;
1389 KASSERT(wl->wl_unsynced_bufbytes >= bufsize);
1390 wl->wl_unsynced_bufbytes -= bufsize;
1391 #endif
1392
1393 /*
1394 * If the current transaction can be reclaimed, start
1395 * at the beginning and reclaim any consecutive reclaimable
1396 * transactions. If we successfully reclaim anything,
1397 * then wakeup anyone waiting for the reclaim.
1398 */
1399 if (we->we_bufcount == 0) {
1400 size_t delta = 0;
1401 int errcnt = 0;
1402 #ifdef WAPBL_DEBUG_BUFBYTES
1403 KDASSERT(we->we_unsynced_bufbytes == 0);
1404 #endif
1405 /*
1406 * clear any posted error, since the buffer it came from
1407 * has successfully flushed by now
1408 */
1409 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1410 (we->we_bufcount == 0)) {
1411 delta += we->we_reclaimable_bytes;
1412 if (we->we_error)
1413 errcnt++;
1414 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1415 pool_put(&wapbl_entry_pool, we);
1416 }
1417
1418 if (delta) {
1419 wl->wl_reclaimable_bytes += delta;
1420 KASSERT(wl->wl_error_count >= errcnt);
1421 wl->wl_error_count -= errcnt;
1422 cv_broadcast(&wl->wl_reclaimable_cv);
1423 }
1424 }
1425
1426 mutex_exit(&wl->wl_mtx);
1427 }
1428
1429 /*
1430 * Write transactions to disk + start I/O for contents
1431 */
1432 int
1433 wapbl_flush(struct wapbl *wl, int waitfor)
1434 {
1435 struct buf *bp;
1436 struct wapbl_entry *we;
1437 off_t off;
1438 off_t head;
1439 off_t tail;
1440 size_t delta = 0;
1441 size_t flushsize;
1442 size_t reserved;
1443 int error = 0;
1444
1445 /*
1446 * Do a quick check to see if a full flush can be skipped
1447 * This assumes that the flush callback does not need to be called
1448 * unless there are other outstanding bufs.
1449 */
1450 if (!waitfor) {
1451 size_t nbufs;
1452 mutex_enter(&wl->wl_mtx); /* XXX need mutex here to
1453 protect the KASSERTS */
1454 nbufs = wl->wl_bufcount;
1455 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1456 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1457 mutex_exit(&wl->wl_mtx);
1458 if (nbufs == 0)
1459 return 0;
1460 }
1461
1462 /*
1463 * XXX we may consider using LK_UPGRADE here
1464 * if we want to call flush from inside a transaction
1465 */
1466 rw_enter(&wl->wl_rwlock, RW_WRITER);
1467 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
1468 wl->wl_dealloccnt);
1469
1470 /*
1471 * Now that we are fully locked and flushed,
1472 * do another check for nothing to do.
1473 */
1474 if (wl->wl_bufcount == 0) {
1475 goto out;
1476 }
1477
1478 #if 0
1479 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1480 ("wapbl_flush thread %d.%d flushing entries with "
1481 "bufcount=%zu bufbytes=%zu\n",
1482 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1483 wl->wl_bufbytes));
1484 #endif
1485
1486 /* Calculate amount of space needed to flush */
1487 flushsize = wapbl_transaction_len(wl);
1488 if (wapbl_verbose_commit) {
1489 struct timespec ts;
1490 getnanotime(&ts);
1491 printf("%s: %lld.%09ld this transaction = %zu bytes\n",
1492 __func__, (long long)ts.tv_sec,
1493 (long)ts.tv_nsec, flushsize);
1494 }
1495
1496 if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1497 /*
1498 * XXX this could be handled more gracefully, perhaps place
1499 * only a partial transaction in the log and allow the
1500 * remaining to flush without the protection of the journal.
1501 */
1502 panic("wapbl_flush: current transaction too big to flush");
1503 }
1504
1505 error = wapbl_truncate(wl, flushsize, 0);
1506 if (error)
1507 goto out2;
1508
1509 off = wl->wl_head;
1510 KASSERT((off == 0) || ((off >= wl->wl_circ_off) &&
1511 (off < wl->wl_circ_off + wl->wl_circ_size)));
1512 error = wapbl_write_blocks(wl, &off);
1513 if (error)
1514 goto out2;
1515 error = wapbl_write_revocations(wl, &off);
1516 if (error)
1517 goto out2;
1518 error = wapbl_write_inodes(wl, &off);
1519 if (error)
1520 goto out2;
1521
1522 reserved = 0;
1523 if (wl->wl_inohashcnt)
1524 reserved = wapbl_transaction_inodes_len(wl);
1525
1526 head = wl->wl_head;
1527 tail = wl->wl_tail;
1528
1529 wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1530 &head, &tail);
1531 #ifdef WAPBL_DEBUG
1532 if (head != off) {
1533 panic("lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1534 " off=%"PRIdMAX" flush=%zu",
1535 (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1536 flushsize);
1537 }
1538 #else
1539 KASSERT(head == off);
1540 #endif
1541
1542 /* Opportunistically move the tail forward if we can */
1543 if (!wapbl_lazy_truncate) {
1544 mutex_enter(&wl->wl_mtx);
1545 delta = wl->wl_reclaimable_bytes;
1546 mutex_exit(&wl->wl_mtx);
1547 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1548 &head, &tail);
1549 }
1550
1551 error = wapbl_write_commit(wl, head, tail);
1552 if (error)
1553 goto out2;
1554
1555 we = pool_get(&wapbl_entry_pool, PR_WAITOK);
1556
1557 #ifdef WAPBL_DEBUG_BUFBYTES
1558 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1559 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1560 " unsynced=%zu"
1561 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1562 "inodes=%d\n",
1563 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1564 wapbl_space_used(wl->wl_circ_size, head, tail),
1565 wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1566 wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1567 wl->wl_inohashcnt));
1568 #else
1569 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1570 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1571 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1572 "inodes=%d\n",
1573 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1574 wapbl_space_used(wl->wl_circ_size, head, tail),
1575 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1576 wl->wl_dealloccnt, wl->wl_inohashcnt));
1577 #endif
1578
1579
1580 mutex_enter(&bufcache_lock);
1581 mutex_enter(&wl->wl_mtx);
1582
1583 wl->wl_reserved_bytes = reserved;
1584 wl->wl_head = head;
1585 wl->wl_tail = tail;
1586 KASSERT(wl->wl_reclaimable_bytes >= delta);
1587 wl->wl_reclaimable_bytes -= delta;
1588 wl->wl_dealloccnt = 0;
1589 #ifdef WAPBL_DEBUG_BUFBYTES
1590 wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1591 #endif
1592
1593 we->we_wapbl = wl;
1594 we->we_bufcount = wl->wl_bufcount;
1595 #ifdef WAPBL_DEBUG_BUFBYTES
1596 we->we_unsynced_bufbytes = wl->wl_bufbytes;
1597 #endif
1598 we->we_reclaimable_bytes = flushsize;
1599 we->we_error = 0;
1600 SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1601
1602 /*
1603 * this flushes bufs in reverse order than they were queued
1604 * it shouldn't matter, but if we care we could use TAILQ instead.
1605 * XXX Note they will get put on the lru queue when they flush
1606 * so we might actually want to change this to preserve order.
1607 */
1608 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
1609 if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1610 continue;
1611 }
1612 bp->b_iodone = wapbl_biodone;
1613 bp->b_private = we;
1614 bremfree(bp);
1615 wapbl_remove_buf_locked(wl, bp);
1616 mutex_exit(&wl->wl_mtx);
1617 mutex_exit(&bufcache_lock);
1618 bawrite(bp);
1619 mutex_enter(&bufcache_lock);
1620 mutex_enter(&wl->wl_mtx);
1621 }
1622 mutex_exit(&wl->wl_mtx);
1623 mutex_exit(&bufcache_lock);
1624
1625 #if 0
1626 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1627 ("wapbl_flush thread %d.%d done flushing entries...\n",
1628 curproc->p_pid, curlwp->l_lid));
1629 #endif
1630
1631 out:
1632
1633 /*
1634 * If the waitfor flag is set, don't return until everything is
1635 * fully flushed and the on disk log is empty.
1636 */
1637 if (waitfor) {
1638 error = wapbl_truncate(wl, wl->wl_circ_size -
1639 wl->wl_reserved_bytes, wapbl_lazy_truncate);
1640 }
1641
1642 out2:
1643 if (error) {
1644 wl->wl_flush_abort(wl->wl_mount, wl->wl_deallocblks,
1645 wl->wl_dealloclens, wl->wl_dealloccnt);
1646 }
1647
1648 #ifdef WAPBL_DEBUG_PRINT
1649 if (error) {
1650 pid_t pid = -1;
1651 lwpid_t lid = -1;
1652 if (curproc)
1653 pid = curproc->p_pid;
1654 if (curlwp)
1655 lid = curlwp->l_lid;
1656 mutex_enter(&wl->wl_mtx);
1657 #ifdef WAPBL_DEBUG_BUFBYTES
1658 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1659 ("wapbl_flush: thread %d.%d aborted flush: "
1660 "error = %d\n"
1661 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1662 "deallocs=%d inodes=%d\n"
1663 "\terrcnt = %d, reclaimable=%zu reserved=%zu "
1664 "unsynced=%zu\n",
1665 pid, lid, error, wl->wl_bufcount,
1666 wl->wl_bufbytes, wl->wl_bcount,
1667 wl->wl_dealloccnt, wl->wl_inohashcnt,
1668 wl->wl_error_count, wl->wl_reclaimable_bytes,
1669 wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1670 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1671 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1672 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1673 "error = %d, unsynced = %zu\n",
1674 we->we_bufcount, we->we_reclaimable_bytes,
1675 we->we_error, we->we_unsynced_bufbytes));
1676 }
1677 #else
1678 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1679 ("wapbl_flush: thread %d.%d aborted flush: "
1680 "error = %d\n"
1681 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1682 "deallocs=%d inodes=%d\n"
1683 "\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
1684 pid, lid, error, wl->wl_bufcount,
1685 wl->wl_bufbytes, wl->wl_bcount,
1686 wl->wl_dealloccnt, wl->wl_inohashcnt,
1687 wl->wl_error_count, wl->wl_reclaimable_bytes,
1688 wl->wl_reserved_bytes));
1689 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1690 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1691 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1692 "error = %d\n", we->we_bufcount,
1693 we->we_reclaimable_bytes, we->we_error));
1694 }
1695 #endif
1696 mutex_exit(&wl->wl_mtx);
1697 }
1698 #endif
1699
1700 rw_exit(&wl->wl_rwlock);
1701 return error;
1702 }
1703
1704 /****************************************************************/
1705
1706 void
1707 wapbl_jlock_assert(struct wapbl *wl)
1708 {
1709
1710 KASSERT(rw_lock_held(&wl->wl_rwlock));
1711 }
1712
1713 void
1714 wapbl_junlock_assert(struct wapbl *wl)
1715 {
1716
1717 KASSERT(!rw_write_held(&wl->wl_rwlock));
1718 }
1719
1720 /****************************************************************/
1721
1722 /* locks missing */
1723 void
1724 wapbl_print(struct wapbl *wl,
1725 int full,
1726 void (*pr)(const char *, ...))
1727 {
1728 struct buf *bp;
1729 struct wapbl_entry *we;
1730 (*pr)("wapbl %p", wl);
1731 (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
1732 wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
1733 (*pr)("circ = %zu, header = %zu, head = %"PRIdMAX" tail = %"PRIdMAX"\n",
1734 wl->wl_circ_size, wl->wl_circ_off,
1735 (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
1736 (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
1737 wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
1738 #ifdef WAPBL_DEBUG_BUFBYTES
1739 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1740 "reserved = %zu errcnt = %d unsynced = %zu\n",
1741 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1742 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1743 wl->wl_error_count, wl->wl_unsynced_bufbytes);
1744 #else
1745 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1746 "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
1747 wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1748 wl->wl_error_count);
1749 #endif
1750 (*pr)("\tdealloccnt = %d, dealloclim = %d\n",
1751 wl->wl_dealloccnt, wl->wl_dealloclim);
1752 (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
1753 wl->wl_inohashcnt, wl->wl_inohashmask);
1754 (*pr)("entries:\n");
1755 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1756 #ifdef WAPBL_DEBUG_BUFBYTES
1757 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
1758 "unsynced = %zu\n",
1759 we->we_bufcount, we->we_reclaimable_bytes,
1760 we->we_error, we->we_unsynced_bufbytes);
1761 #else
1762 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
1763 we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
1764 #endif
1765 }
1766 if (full) {
1767 int cnt = 0;
1768 (*pr)("bufs =");
1769 LIST_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
1770 if (!LIST_NEXT(bp, b_wapbllist)) {
1771 (*pr)(" %p", bp);
1772 } else if ((++cnt % 6) == 0) {
1773 (*pr)(" %p,\n\t", bp);
1774 } else {
1775 (*pr)(" %p,", bp);
1776 }
1777 }
1778 (*pr)("\n");
1779
1780 (*pr)("dealloced blks = ");
1781 {
1782 int i;
1783 cnt = 0;
1784 for (i = 0; i < wl->wl_dealloccnt; i++) {
1785 (*pr)(" %"PRId64":%d,",
1786 wl->wl_deallocblks[i],
1787 wl->wl_dealloclens[i]);
1788 if ((++cnt % 4) == 0) {
1789 (*pr)("\n\t");
1790 }
1791 }
1792 }
1793 (*pr)("\n");
1794
1795 (*pr)("registered inodes = ");
1796 {
1797 int i;
1798 cnt = 0;
1799 for (i = 0; i <= wl->wl_inohashmask; i++) {
1800 struct wapbl_ino_head *wih;
1801 struct wapbl_ino *wi;
1802
1803 wih = &wl->wl_inohash[i];
1804 LIST_FOREACH(wi, wih, wi_hash) {
1805 if (wi->wi_ino == 0)
1806 continue;
1807 (*pr)(" %"PRIu64"/0%06"PRIo32",",
1808 wi->wi_ino, wi->wi_mode);
1809 if ((++cnt % 4) == 0) {
1810 (*pr)("\n\t");
1811 }
1812 }
1813 }
1814 (*pr)("\n");
1815 }
1816 }
1817 }
1818
1819 #if defined(WAPBL_DEBUG) || defined(DDB)
1820 void
1821 wapbl_dump(struct wapbl *wl)
1822 {
1823 #if defined(WAPBL_DEBUG)
1824 if (!wl)
1825 wl = wapbl_debug_wl;
1826 #endif
1827 if (!wl)
1828 return;
1829 wapbl_print(wl, 1, printf);
1830 }
1831 #endif
1832
1833 /****************************************************************/
1834
1835 void
1836 wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len)
1837 {
1838
1839 wapbl_jlock_assert(wl);
1840
1841 mutex_enter(&wl->wl_mtx);
1842 /* XXX should eventually instead tie this into resource estimation */
1843 /*
1844 * XXX this panic needs locking/mutex analysis and the
1845 * ability to cope with the failure.
1846 */
1847 /* XXX this XXX doesn't have enough XXX */
1848 if (__predict_false(wl->wl_dealloccnt >= wl->wl_dealloclim))
1849 panic("wapbl_register_deallocation: out of resources");
1850
1851 wl->wl_deallocblks[wl->wl_dealloccnt] = blk;
1852 wl->wl_dealloclens[wl->wl_dealloccnt] = len;
1853 wl->wl_dealloccnt++;
1854 WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
1855 ("wapbl_register_deallocation: blk=%"PRId64" len=%d\n", blk, len));
1856 mutex_exit(&wl->wl_mtx);
1857 }
1858
1859 /****************************************************************/
1860
1861 static void
1862 wapbl_inodetrk_init(struct wapbl *wl, u_int size)
1863 {
1864
1865 wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
1866 if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) {
1867 pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0,
1868 "wapblinopl", &pool_allocator_nointr, IPL_NONE);
1869 }
1870 }
1871
1872 static void
1873 wapbl_inodetrk_free(struct wapbl *wl)
1874 {
1875
1876 /* XXX this KASSERT needs locking/mutex analysis */
1877 KASSERT(wl->wl_inohashcnt == 0);
1878 hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
1879 if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) {
1880 pool_destroy(&wapbl_ino_pool);
1881 }
1882 }
1883
1884 static struct wapbl_ino *
1885 wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
1886 {
1887 struct wapbl_ino_head *wih;
1888 struct wapbl_ino *wi;
1889
1890 KASSERT(mutex_owned(&wl->wl_mtx));
1891
1892 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1893 LIST_FOREACH(wi, wih, wi_hash) {
1894 if (ino == wi->wi_ino)
1895 return wi;
1896 }
1897 return 0;
1898 }
1899
1900 void
1901 wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1902 {
1903 struct wapbl_ino_head *wih;
1904 struct wapbl_ino *wi;
1905
1906 wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
1907
1908 mutex_enter(&wl->wl_mtx);
1909 if (wapbl_inodetrk_get(wl, ino) == NULL) {
1910 wi->wi_ino = ino;
1911 wi->wi_mode = mode;
1912 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1913 LIST_INSERT_HEAD(wih, wi, wi_hash);
1914 wl->wl_inohashcnt++;
1915 WAPBL_PRINTF(WAPBL_PRINT_INODE,
1916 ("wapbl_register_inode: ino=%"PRId64"\n", ino));
1917 mutex_exit(&wl->wl_mtx);
1918 } else {
1919 mutex_exit(&wl->wl_mtx);
1920 pool_put(&wapbl_ino_pool, wi);
1921 }
1922 }
1923
1924 void
1925 wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1926 {
1927 struct wapbl_ino *wi;
1928
1929 mutex_enter(&wl->wl_mtx);
1930 wi = wapbl_inodetrk_get(wl, ino);
1931 if (wi) {
1932 WAPBL_PRINTF(WAPBL_PRINT_INODE,
1933 ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
1934 KASSERT(wl->wl_inohashcnt > 0);
1935 wl->wl_inohashcnt--;
1936 LIST_REMOVE(wi, wi_hash);
1937 mutex_exit(&wl->wl_mtx);
1938
1939 pool_put(&wapbl_ino_pool, wi);
1940 } else {
1941 mutex_exit(&wl->wl_mtx);
1942 }
1943 }
1944
1945 /****************************************************************/
1946
1947 static inline size_t
1948 wapbl_transaction_inodes_len(struct wapbl *wl)
1949 {
1950 int blocklen = 1<<wl->wl_log_dev_bshift;
1951 int iph;
1952
1953 /* Calculate number of inodes described in a inodelist header */
1954 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
1955 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
1956
1957 KASSERT(iph > 0);
1958
1959 return MAX(1, howmany(wl->wl_inohashcnt, iph)) * blocklen;
1960 }
1961
1962
1963 /* Calculate amount of space a transaction will take on disk */
1964 static size_t
1965 wapbl_transaction_len(struct wapbl *wl)
1966 {
1967 int blocklen = 1<<wl->wl_log_dev_bshift;
1968 size_t len;
1969 int bph;
1970
1971 /* Calculate number of blocks described in a blocklist header */
1972 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1973 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1974
1975 KASSERT(bph > 0);
1976
1977 len = wl->wl_bcount;
1978 len += howmany(wl->wl_bufcount, bph) * blocklen;
1979 len += howmany(wl->wl_dealloccnt, bph) * blocklen;
1980 len += wapbl_transaction_inodes_len(wl);
1981
1982 return len;
1983 }
1984
1985 /*
1986 * wapbl_cache_sync: issue DIOCCACHESYNC
1987 */
1988 static int
1989 wapbl_cache_sync(struct wapbl *wl, const char *msg)
1990 {
1991 const bool verbose = wapbl_verbose_commit >= 2;
1992 struct bintime start_time;
1993 int force = 1;
1994 int error;
1995
1996 if (!wapbl_flush_disk_cache) {
1997 return 0;
1998 }
1999 if (verbose) {
2000 bintime(&start_time);
2001 }
2002 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force,
2003 FWRITE, FSCRED);
2004 if (error) {
2005 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
2006 ("wapbl_cache_sync: DIOCCACHESYNC on dev 0x%x "
2007 "returned %d\n", wl->wl_devvp->v_rdev, error));
2008 }
2009 if (verbose) {
2010 struct bintime d;
2011 struct timespec ts;
2012
2013 bintime(&d);
2014 bintime_sub(&d, &start_time);
2015 bintime2timespec(&d, &ts);
2016 printf("wapbl_cache_sync: %s: dev 0x%jx %ju.%09lu\n",
2017 msg, (uintmax_t)wl->wl_devvp->v_rdev,
2018 (uintmax_t)ts.tv_sec, ts.tv_nsec);
2019 }
2020 return error;
2021 }
2022
2023 /*
2024 * Perform commit operation
2025 *
2026 * Note that generation number incrementation needs to
2027 * be protected against racing with other invocations
2028 * of wapbl_write_commit. This is ok since this routine
2029 * is only invoked from wapbl_flush
2030 */
2031 static int
2032 wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
2033 {
2034 struct wapbl_wc_header *wc = wl->wl_wc_header;
2035 struct timespec ts;
2036 int error;
2037 daddr_t pbn;
2038
2039 error = wapbl_buffered_flush(wl);
2040 if (error)
2041 return error;
2042 /*
2043 * flush disk cache to ensure that blocks we've written are actually
2044 * written to the stable storage before the commit header.
2045 *
2046 * XXX Calc checksum here, instead we do this for now
2047 */
2048 wapbl_cache_sync(wl, "1");
2049
2050 wc->wc_head = head;
2051 wc->wc_tail = tail;
2052 wc->wc_checksum = 0;
2053 wc->wc_version = 1;
2054 getnanotime(&ts);
2055 wc->wc_time = ts.tv_sec;
2056 wc->wc_timensec = ts.tv_nsec;
2057
2058 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2059 ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
2060 (intmax_t)head, (intmax_t)tail));
2061
2062 /*
2063 * write the commit header.
2064 *
2065 * XXX if generation will rollover, then first zero
2066 * over second commit header before trying to write both headers.
2067 */
2068
2069 pbn = wl->wl_logpbn + (wc->wc_generation % 2);
2070 #ifdef _KERNEL
2071 pbn = btodb(pbn << wc->wc_log_dev_bshift);
2072 #endif
2073 error = wapbl_buffered_write(wc, wc->wc_len, wl, pbn);
2074 if (error)
2075 return error;
2076 error = wapbl_buffered_flush(wl);
2077 if (error)
2078 return error;
2079
2080 /*
2081 * flush disk cache to ensure that the commit header is actually
2082 * written before meta data blocks.
2083 */
2084 wapbl_cache_sync(wl, "2");
2085
2086 /*
2087 * If the generation number was zero, write it out a second time.
2088 * This handles initialization and generation number rollover
2089 */
2090 if (wc->wc_generation++ == 0) {
2091 error = wapbl_write_commit(wl, head, tail);
2092 /*
2093 * This panic should be able to be removed if we do the
2094 * zero'ing mentioned above, and we are certain to roll
2095 * back generation number on failure.
2096 */
2097 if (error)
2098 panic("wapbl_write_commit: error writing duplicate "
2099 "log header: %d", error);
2100 }
2101 return 0;
2102 }
2103
2104 /* Returns new offset value */
2105 static int
2106 wapbl_write_blocks(struct wapbl *wl, off_t *offp)
2107 {
2108 struct wapbl_wc_blocklist *wc =
2109 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2110 int blocklen = 1<<wl->wl_log_dev_bshift;
2111 int bph;
2112 struct buf *bp;
2113 off_t off = *offp;
2114 int error;
2115 size_t padding;
2116
2117 KASSERT(rw_write_held(&wl->wl_rwlock));
2118
2119 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
2120 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
2121
2122 bp = LIST_FIRST(&wl->wl_bufs);
2123
2124 while (bp) {
2125 int cnt;
2126 struct buf *obp = bp;
2127
2128 KASSERT(bp->b_flags & B_LOCKED);
2129
2130 wc->wc_type = WAPBL_WC_BLOCKS;
2131 wc->wc_len = blocklen;
2132 wc->wc_blkcount = 0;
2133 while (bp && (wc->wc_blkcount < bph)) {
2134 /*
2135 * Make sure all the physical block numbers are up to
2136 * date. If this is not always true on a given
2137 * filesystem, then VOP_BMAP must be called. We
2138 * could call VOP_BMAP here, or else in the filesystem
2139 * specific flush callback, although neither of those
2140 * solutions allow us to take the vnode lock. If a
2141 * filesystem requires that we must take the vnode lock
2142 * to call VOP_BMAP, then we can probably do it in
2143 * bwrite when the vnode lock should already be held
2144 * by the invoking code.
2145 */
2146 KASSERT((bp->b_vp->v_type == VBLK) ||
2147 (bp->b_blkno != bp->b_lblkno));
2148 KASSERT(bp->b_blkno > 0);
2149
2150 wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
2151 wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
2152 wc->wc_len += bp->b_bcount;
2153 wc->wc_blkcount++;
2154 bp = LIST_NEXT(bp, b_wapbllist);
2155 }
2156 if (wc->wc_len % blocklen != 0) {
2157 padding = blocklen - wc->wc_len % blocklen;
2158 wc->wc_len += padding;
2159 } else {
2160 padding = 0;
2161 }
2162
2163 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2164 ("wapbl_write_blocks: len = %u (padding %zu) off = %"PRIdMAX"\n",
2165 wc->wc_len, padding, (intmax_t)off));
2166
2167 error = wapbl_circ_write(wl, wc, blocklen, &off);
2168 if (error)
2169 return error;
2170 bp = obp;
2171 cnt = 0;
2172 while (bp && (cnt++ < bph)) {
2173 error = wapbl_circ_write(wl, bp->b_data,
2174 bp->b_bcount, &off);
2175 if (error)
2176 return error;
2177 bp = LIST_NEXT(bp, b_wapbllist);
2178 }
2179 if (padding) {
2180 void *zero;
2181
2182 zero = wapbl_alloc(padding);
2183 memset(zero, 0, padding);
2184 error = wapbl_circ_write(wl, zero, padding, &off);
2185 wapbl_free(zero, padding);
2186 if (error)
2187 return error;
2188 }
2189 }
2190 *offp = off;
2191 return 0;
2192 }
2193
2194 static int
2195 wapbl_write_revocations(struct wapbl *wl, off_t *offp)
2196 {
2197 struct wapbl_wc_blocklist *wc =
2198 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2199 int i;
2200 int blocklen = 1<<wl->wl_log_dev_bshift;
2201 int bph;
2202 off_t off = *offp;
2203 int error;
2204
2205 if (wl->wl_dealloccnt == 0)
2206 return 0;
2207
2208 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
2209 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
2210
2211 i = 0;
2212 while (i < wl->wl_dealloccnt) {
2213 wc->wc_type = WAPBL_WC_REVOCATIONS;
2214 wc->wc_len = blocklen;
2215 wc->wc_blkcount = 0;
2216 while ((i < wl->wl_dealloccnt) && (wc->wc_blkcount < bph)) {
2217 wc->wc_blocks[wc->wc_blkcount].wc_daddr =
2218 wl->wl_deallocblks[i];
2219 wc->wc_blocks[wc->wc_blkcount].wc_dlen =
2220 wl->wl_dealloclens[i];
2221 wc->wc_blkcount++;
2222 i++;
2223 }
2224 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2225 ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
2226 wc->wc_len, (intmax_t)off));
2227 error = wapbl_circ_write(wl, wc, blocklen, &off);
2228 if (error)
2229 return error;
2230 }
2231 *offp = off;
2232 return 0;
2233 }
2234
2235 static int
2236 wapbl_write_inodes(struct wapbl *wl, off_t *offp)
2237 {
2238 struct wapbl_wc_inodelist *wc =
2239 (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
2240 int i;
2241 int blocklen = 1 << wl->wl_log_dev_bshift;
2242 off_t off = *offp;
2243 int error;
2244
2245 struct wapbl_ino_head *wih;
2246 struct wapbl_ino *wi;
2247 int iph;
2248
2249 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2250 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2251
2252 i = 0;
2253 wih = &wl->wl_inohash[0];
2254 wi = 0;
2255 do {
2256 wc->wc_type = WAPBL_WC_INODES;
2257 wc->wc_len = blocklen;
2258 wc->wc_inocnt = 0;
2259 wc->wc_clear = (i == 0);
2260 while ((i < wl->wl_inohashcnt) && (wc->wc_inocnt < iph)) {
2261 while (!wi) {
2262 KASSERT((wih - &wl->wl_inohash[0])
2263 <= wl->wl_inohashmask);
2264 wi = LIST_FIRST(wih++);
2265 }
2266 wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2267 wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2268 wc->wc_inocnt++;
2269 i++;
2270 wi = LIST_NEXT(wi, wi_hash);
2271 }
2272 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2273 ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2274 wc->wc_len, (intmax_t)off));
2275 error = wapbl_circ_write(wl, wc, blocklen, &off);
2276 if (error)
2277 return error;
2278 } while (i < wl->wl_inohashcnt);
2279
2280 *offp = off;
2281 return 0;
2282 }
2283
2284 #endif /* _KERNEL */
2285
2286 /****************************************************************/
2287
2288 struct wapbl_blk {
2289 LIST_ENTRY(wapbl_blk) wb_hash;
2290 daddr_t wb_blk;
2291 off_t wb_off; /* Offset of this block in the log */
2292 };
2293 #define WAPBL_BLKPOOL_MIN 83
2294
2295 static void
2296 wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2297 {
2298 if (size < WAPBL_BLKPOOL_MIN)
2299 size = WAPBL_BLKPOOL_MIN;
2300 KASSERT(wr->wr_blkhash == 0);
2301 #ifdef _KERNEL
2302 wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2303 #else /* ! _KERNEL */
2304 /* Manually implement hashinit */
2305 {
2306 unsigned long i, hashsize;
2307 for (hashsize = 1; hashsize < size; hashsize <<= 1)
2308 continue;
2309 wr->wr_blkhash = wapbl_alloc(hashsize * sizeof(*wr->wr_blkhash));
2310 for (i = 0; i < hashsize; i++)
2311 LIST_INIT(&wr->wr_blkhash[i]);
2312 wr->wr_blkhashmask = hashsize - 1;
2313 }
2314 #endif /* ! _KERNEL */
2315 }
2316
2317 static void
2318 wapbl_blkhash_free(struct wapbl_replay *wr)
2319 {
2320 KASSERT(wr->wr_blkhashcnt == 0);
2321 #ifdef _KERNEL
2322 hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2323 #else /* ! _KERNEL */
2324 wapbl_free(wr->wr_blkhash,
2325 (wr->wr_blkhashmask + 1) * sizeof(*wr->wr_blkhash));
2326 #endif /* ! _KERNEL */
2327 }
2328
2329 static struct wapbl_blk *
2330 wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2331 {
2332 struct wapbl_blk_head *wbh;
2333 struct wapbl_blk *wb;
2334 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2335 LIST_FOREACH(wb, wbh, wb_hash) {
2336 if (blk == wb->wb_blk)
2337 return wb;
2338 }
2339 return 0;
2340 }
2341
2342 static void
2343 wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2344 {
2345 struct wapbl_blk_head *wbh;
2346 struct wapbl_blk *wb;
2347 wb = wapbl_blkhash_get(wr, blk);
2348 if (wb) {
2349 KASSERT(wb->wb_blk == blk);
2350 wb->wb_off = off;
2351 } else {
2352 wb = wapbl_alloc(sizeof(*wb));
2353 wb->wb_blk = blk;
2354 wb->wb_off = off;
2355 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2356 LIST_INSERT_HEAD(wbh, wb, wb_hash);
2357 wr->wr_blkhashcnt++;
2358 }
2359 }
2360
2361 static void
2362 wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2363 {
2364 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2365 if (wb) {
2366 KASSERT(wr->wr_blkhashcnt > 0);
2367 wr->wr_blkhashcnt--;
2368 LIST_REMOVE(wb, wb_hash);
2369 wapbl_free(wb, sizeof(*wb));
2370 }
2371 }
2372
2373 static void
2374 wapbl_blkhash_clear(struct wapbl_replay *wr)
2375 {
2376 unsigned long i;
2377 for (i = 0; i <= wr->wr_blkhashmask; i++) {
2378 struct wapbl_blk *wb;
2379
2380 while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2381 KASSERT(wr->wr_blkhashcnt > 0);
2382 wr->wr_blkhashcnt--;
2383 LIST_REMOVE(wb, wb_hash);
2384 wapbl_free(wb, sizeof(*wb));
2385 }
2386 }
2387 KASSERT(wr->wr_blkhashcnt == 0);
2388 }
2389
2390 /****************************************************************/
2391
2392 static int
2393 wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2394 {
2395 size_t slen;
2396 off_t off = *offp;
2397 int error;
2398 daddr_t pbn;
2399
2400 KASSERT(((len >> wr->wr_log_dev_bshift) <<
2401 wr->wr_log_dev_bshift) == len);
2402
2403 if (off < wr->wr_circ_off)
2404 off = wr->wr_circ_off;
2405 slen = wr->wr_circ_off + wr->wr_circ_size - off;
2406 if (slen < len) {
2407 pbn = wr->wr_logpbn + (off >> wr->wr_log_dev_bshift);
2408 #ifdef _KERNEL
2409 pbn = btodb(pbn << wr->wr_log_dev_bshift);
2410 #endif
2411 error = wapbl_read(data, slen, wr->wr_devvp, pbn);
2412 if (error)
2413 return error;
2414 data = (uint8_t *)data + slen;
2415 len -= slen;
2416 off = wr->wr_circ_off;
2417 }
2418 pbn = wr->wr_logpbn + (off >> wr->wr_log_dev_bshift);
2419 #ifdef _KERNEL
2420 pbn = btodb(pbn << wr->wr_log_dev_bshift);
2421 #endif
2422 error = wapbl_read(data, len, wr->wr_devvp, pbn);
2423 if (error)
2424 return error;
2425 off += len;
2426 if (off >= wr->wr_circ_off + wr->wr_circ_size)
2427 off = wr->wr_circ_off;
2428 *offp = off;
2429 return 0;
2430 }
2431
2432 static void
2433 wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2434 {
2435 size_t slen;
2436 off_t off = *offp;
2437
2438 KASSERT(((len >> wr->wr_log_dev_bshift) <<
2439 wr->wr_log_dev_bshift) == len);
2440
2441 if (off < wr->wr_circ_off)
2442 off = wr->wr_circ_off;
2443 slen = wr->wr_circ_off + wr->wr_circ_size - off;
2444 if (slen < len) {
2445 len -= slen;
2446 off = wr->wr_circ_off;
2447 }
2448 off += len;
2449 if (off >= wr->wr_circ_off + wr->wr_circ_size)
2450 off = wr->wr_circ_off;
2451 *offp = off;
2452 }
2453
2454 /****************************************************************/
2455
2456 int
2457 wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2458 daddr_t off, size_t count, size_t blksize)
2459 {
2460 struct wapbl_replay *wr;
2461 int error;
2462 struct vnode *devvp;
2463 daddr_t logpbn;
2464 uint8_t *scratch;
2465 struct wapbl_wc_header *wch;
2466 struct wapbl_wc_header *wch2;
2467 /* Use this until we read the actual log header */
2468 int log_dev_bshift = ilog2(blksize);
2469 size_t used;
2470 daddr_t pbn;
2471
2472 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2473 ("wapbl_replay_start: vp=%p off=%"PRId64 " count=%zu blksize=%zu\n",
2474 vp, off, count, blksize));
2475
2476 if (off < 0)
2477 return EINVAL;
2478
2479 if (blksize < DEV_BSIZE)
2480 return EINVAL;
2481 if (blksize % DEV_BSIZE)
2482 return EINVAL;
2483
2484 #ifdef _KERNEL
2485 #if 0
2486 /* XXX vp->v_size isn't reliably set for VBLK devices,
2487 * especially root. However, we might still want to verify
2488 * that the full load is readable */
2489 if ((off + count) * blksize > vp->v_size)
2490 return EINVAL;
2491 #endif
2492 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2493 return error;
2494 }
2495 #else /* ! _KERNEL */
2496 devvp = vp;
2497 logpbn = off;
2498 #endif /* ! _KERNEL */
2499
2500 scratch = wapbl_alloc(MAXBSIZE);
2501
2502 pbn = logpbn;
2503 #ifdef _KERNEL
2504 pbn = btodb(pbn << log_dev_bshift);
2505 #endif
2506 error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, pbn);
2507 if (error)
2508 goto errout;
2509
2510 wch = (struct wapbl_wc_header *)scratch;
2511 wch2 =
2512 (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
2513 /* XXX verify checksums and magic numbers */
2514 if (wch->wc_type != WAPBL_WC_HEADER) {
2515 printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
2516 error = EFTYPE;
2517 goto errout;
2518 }
2519
2520 if (wch2->wc_generation > wch->wc_generation)
2521 wch = wch2;
2522
2523 wr = wapbl_calloc(1, sizeof(*wr));
2524
2525 wr->wr_logvp = vp;
2526 wr->wr_devvp = devvp;
2527 wr->wr_logpbn = logpbn;
2528
2529 wr->wr_scratch = scratch;
2530
2531 wr->wr_log_dev_bshift = wch->wc_log_dev_bshift;
2532 wr->wr_fs_dev_bshift = wch->wc_fs_dev_bshift;
2533 wr->wr_circ_off = wch->wc_circ_off;
2534 wr->wr_circ_size = wch->wc_circ_size;
2535 wr->wr_generation = wch->wc_generation;
2536
2537 used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
2538
2539 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2540 ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
2541 " len=%"PRId64" used=%zu\n",
2542 wch->wc_head, wch->wc_tail, wch->wc_circ_off,
2543 wch->wc_circ_size, used));
2544
2545 wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
2546
2547 error = wapbl_replay_process(wr, wch->wc_head, wch->wc_tail);
2548 if (error) {
2549 wapbl_replay_stop(wr);
2550 wapbl_replay_free(wr);
2551 return error;
2552 }
2553
2554 *wrp = wr;
2555 return 0;
2556
2557 errout:
2558 wapbl_free(scratch, MAXBSIZE);
2559 return error;
2560 }
2561
2562 void
2563 wapbl_replay_stop(struct wapbl_replay *wr)
2564 {
2565
2566 if (!wapbl_replay_isopen(wr))
2567 return;
2568
2569 WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
2570
2571 wapbl_free(wr->wr_scratch, MAXBSIZE);
2572 wr->wr_scratch = NULL;
2573
2574 wr->wr_logvp = NULL;
2575
2576 wapbl_blkhash_clear(wr);
2577 wapbl_blkhash_free(wr);
2578 }
2579
2580 void
2581 wapbl_replay_free(struct wapbl_replay *wr)
2582 {
2583
2584 KDASSERT(!wapbl_replay_isopen(wr));
2585
2586 if (wr->wr_inodes)
2587 wapbl_free(wr->wr_inodes,
2588 wr->wr_inodescnt * sizeof(wr->wr_inodes[0]));
2589 wapbl_free(wr, sizeof(*wr));
2590 }
2591
2592 #ifdef _KERNEL
2593 int
2594 wapbl_replay_isopen1(struct wapbl_replay *wr)
2595 {
2596
2597 return wapbl_replay_isopen(wr);
2598 }
2599 #endif
2600
2601 /*
2602 * calculate the disk address for the i'th block in the wc_blockblist
2603 * offset by j blocks of size blen.
2604 *
2605 * wc_daddr is always a kernel disk address in DEV_BSIZE units that
2606 * was written to the journal.
2607 *
2608 * The kernel needs that address plus the offset in DEV_BSIZE units.
2609 *
2610 * Userland needs that address plus the offset in blen units.
2611 *
2612 */
2613 static daddr_t
2614 wapbl_block_daddr(struct wapbl_wc_blocklist *wc, int i, int j, int blen)
2615 {
2616 daddr_t pbn;
2617
2618 #ifdef _KERNEL
2619 pbn = wc->wc_blocks[i].wc_daddr + btodb(j * blen);
2620 #else
2621 pbn = dbtob(wc->wc_blocks[i].wc_daddr) / blen + j;
2622 #endif
2623
2624 return pbn;
2625 }
2626
2627 static void
2628 wapbl_replay_process_blocks(struct wapbl_replay *wr, off_t *offp)
2629 {
2630 struct wapbl_wc_blocklist *wc =
2631 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2632 int fsblklen = 1 << wr->wr_fs_dev_bshift;
2633 int i, j, n;
2634
2635 for (i = 0; i < wc->wc_blkcount; i++) {
2636 /*
2637 * Enter each physical block into the hashtable independently.
2638 */
2639 n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
2640 for (j = 0; j < n; j++) {
2641 wapbl_blkhash_ins(wr, wapbl_block_daddr(wc, i, j, fsblklen),
2642 *offp);
2643 wapbl_circ_advance(wr, fsblklen, offp);
2644 }
2645 }
2646 }
2647
2648 static void
2649 wapbl_replay_process_revocations(struct wapbl_replay *wr)
2650 {
2651 struct wapbl_wc_blocklist *wc =
2652 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2653 int fsblklen = 1 << wr->wr_fs_dev_bshift;
2654 int i, j, n;
2655
2656 for (i = 0; i < wc->wc_blkcount; i++) {
2657 /*
2658 * Remove any blocks found from the hashtable.
2659 */
2660 n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
2661 for (j = 0; j < n; j++)
2662 wapbl_blkhash_rem(wr, wapbl_block_daddr(wc, i, j, fsblklen));
2663 }
2664 }
2665
2666 static void
2667 wapbl_replay_process_inodes(struct wapbl_replay *wr, off_t oldoff, off_t newoff)
2668 {
2669 struct wapbl_wc_inodelist *wc =
2670 (struct wapbl_wc_inodelist *)wr->wr_scratch;
2671 void *new_inodes;
2672 const size_t oldsize = wr->wr_inodescnt * sizeof(wr->wr_inodes[0]);
2673
2674 KASSERT(sizeof(wr->wr_inodes[0]) == sizeof(wc->wc_inodes[0]));
2675
2676 /*
2677 * Keep track of where we found this so location won't be
2678 * overwritten.
2679 */
2680 if (wc->wc_clear) {
2681 wr->wr_inodestail = oldoff;
2682 wr->wr_inodescnt = 0;
2683 if (wr->wr_inodes != NULL) {
2684 wapbl_free(wr->wr_inodes, oldsize);
2685 wr->wr_inodes = NULL;
2686 }
2687 }
2688 wr->wr_inodeshead = newoff;
2689 if (wc->wc_inocnt == 0)
2690 return;
2691
2692 new_inodes = wapbl_alloc((wr->wr_inodescnt + wc->wc_inocnt) *
2693 sizeof(wr->wr_inodes[0]));
2694 if (wr->wr_inodes != NULL) {
2695 memcpy(new_inodes, wr->wr_inodes, oldsize);
2696 wapbl_free(wr->wr_inodes, oldsize);
2697 }
2698 wr->wr_inodes = new_inodes;
2699 memcpy(&wr->wr_inodes[wr->wr_inodescnt], wc->wc_inodes,
2700 wc->wc_inocnt * sizeof(wr->wr_inodes[0]));
2701 wr->wr_inodescnt += wc->wc_inocnt;
2702 }
2703
2704 static int
2705 wapbl_replay_process(struct wapbl_replay *wr, off_t head, off_t tail)
2706 {
2707 off_t off;
2708 int error;
2709
2710 int logblklen = 1 << wr->wr_log_dev_bshift;
2711
2712 wapbl_blkhash_clear(wr);
2713
2714 off = tail;
2715 while (off != head) {
2716 struct wapbl_wc_null *wcn;
2717 off_t saveoff = off;
2718 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2719 if (error)
2720 goto errout;
2721 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2722 switch (wcn->wc_type) {
2723 case WAPBL_WC_BLOCKS:
2724 wapbl_replay_process_blocks(wr, &off);
2725 break;
2726
2727 case WAPBL_WC_REVOCATIONS:
2728 wapbl_replay_process_revocations(wr);
2729 break;
2730
2731 case WAPBL_WC_INODES:
2732 wapbl_replay_process_inodes(wr, saveoff, off);
2733 break;
2734
2735 default:
2736 printf("Unrecognized wapbl type: 0x%08x\n",
2737 wcn->wc_type);
2738 error = EFTYPE;
2739 goto errout;
2740 }
2741 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2742 if (off != saveoff) {
2743 printf("wapbl_replay: corrupted records\n");
2744 error = EFTYPE;
2745 goto errout;
2746 }
2747 }
2748 return 0;
2749
2750 errout:
2751 wapbl_blkhash_clear(wr);
2752 return error;
2753 }
2754
2755 #if 0
2756 int
2757 wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
2758 {
2759 off_t off;
2760 int mismatchcnt = 0;
2761 int logblklen = 1 << wr->wr_log_dev_bshift;
2762 int fsblklen = 1 << wr->wr_fs_dev_bshift;
2763 void *scratch1 = wapbl_alloc(MAXBSIZE);
2764 void *scratch2 = wapbl_alloc(MAXBSIZE);
2765 int error = 0;
2766
2767 KDASSERT(wapbl_replay_isopen(wr));
2768
2769 off = wch->wc_tail;
2770 while (off != wch->wc_head) {
2771 struct wapbl_wc_null *wcn;
2772 #ifdef DEBUG
2773 off_t saveoff = off;
2774 #endif
2775 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2776 if (error)
2777 goto out;
2778 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2779 switch (wcn->wc_type) {
2780 case WAPBL_WC_BLOCKS:
2781 {
2782 struct wapbl_wc_blocklist *wc =
2783 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2784 int i;
2785 for (i = 0; i < wc->wc_blkcount; i++) {
2786 int foundcnt = 0;
2787 int dirtycnt = 0;
2788 int j, n;
2789 /*
2790 * Check each physical block into the
2791 * hashtable independently
2792 */
2793 n = wc->wc_blocks[i].wc_dlen >>
2794 wch->wc_fs_dev_bshift;
2795 for (j = 0; j < n; j++) {
2796 struct wapbl_blk *wb =
2797 wapbl_blkhash_get(wr,
2798 wapbl_block_daddr(wc, i, j, fsblklen));
2799 if (wb && (wb->wb_off == off)) {
2800 foundcnt++;
2801 error =
2802 wapbl_circ_read(wr,
2803 scratch1, fsblklen,
2804 &off);
2805 if (error)
2806 goto out;
2807 error =
2808 wapbl_read(scratch2,
2809 fsblklen, fsdevvp,
2810 wb->wb_blk);
2811 if (error)
2812 goto out;
2813 if (memcmp(scratch1,
2814 scratch2,
2815 fsblklen)) {
2816 printf(
2817 "wapbl_verify: mismatch block %"PRId64" at off %"PRIdMAX"\n",
2818 wb->wb_blk, (intmax_t)off);
2819 dirtycnt++;
2820 mismatchcnt++;
2821 }
2822 } else {
2823 wapbl_circ_advance(wr,
2824 fsblklen, &off);
2825 }
2826 }
2827 #if 0
2828 /*
2829 * If all of the blocks in an entry
2830 * are clean, then remove all of its
2831 * blocks from the hashtable since they
2832 * never will need replay.
2833 */
2834 if ((foundcnt != 0) &&
2835 (dirtycnt == 0)) {
2836 off = saveoff;
2837 wapbl_circ_advance(wr,
2838 logblklen, &off);
2839 for (j = 0; j < n; j++) {
2840 struct wapbl_blk *wb =
2841 wapbl_blkhash_get(wr,
2842 wapbl_block_daddr(wc, i, j, fsblklen));
2843 if (wb &&
2844 (wb->wb_off == off)) {
2845 wapbl_blkhash_rem(wr, wb->wb_blk);
2846 }
2847 wapbl_circ_advance(wr,
2848 fsblklen, &off);
2849 }
2850 }
2851 #endif
2852 }
2853 }
2854 break;
2855 case WAPBL_WC_REVOCATIONS:
2856 case WAPBL_WC_INODES:
2857 break;
2858 default:
2859 KASSERT(0);
2860 }
2861 #ifdef DEBUG
2862 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2863 KASSERT(off == saveoff);
2864 #endif
2865 }
2866 out:
2867 wapbl_free(scratch1, MAXBSIZE);
2868 wapbl_free(scratch2, MAXBSIZE);
2869 if (!error && mismatchcnt)
2870 error = EFTYPE;
2871 return error;
2872 }
2873 #endif
2874
2875 int
2876 wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
2877 {
2878 struct wapbl_blk *wb;
2879 size_t i;
2880 off_t off;
2881 void *scratch;
2882 int error = 0;
2883 int fsblklen = 1 << wr->wr_fs_dev_bshift;
2884
2885 KDASSERT(wapbl_replay_isopen(wr));
2886
2887 scratch = wapbl_alloc(MAXBSIZE);
2888
2889 for (i = 0; i <= wr->wr_blkhashmask; ++i) {
2890 LIST_FOREACH(wb, &wr->wr_blkhash[i], wb_hash) {
2891 off = wb->wb_off;
2892 error = wapbl_circ_read(wr, scratch, fsblklen, &off);
2893 if (error)
2894 break;
2895 error = wapbl_write(scratch, fsblklen, fsdevvp,
2896 wb->wb_blk);
2897 if (error)
2898 break;
2899 }
2900 }
2901
2902 wapbl_free(scratch, MAXBSIZE);
2903 return error;
2904 }
2905
2906 int
2907 wapbl_replay_can_read(struct wapbl_replay *wr, daddr_t blk, long len)
2908 {
2909 int fsblklen = 1 << wr->wr_fs_dev_bshift;
2910
2911 KDASSERT(wapbl_replay_isopen(wr));
2912 KASSERT((len % fsblklen) == 0);
2913
2914 while (len != 0) {
2915 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2916 if (wb)
2917 return 1;
2918 len -= fsblklen;
2919 }
2920 return 0;
2921 }
2922
2923 int
2924 wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
2925 {
2926 int fsblklen = 1 << wr->wr_fs_dev_bshift;
2927
2928 KDASSERT(wapbl_replay_isopen(wr));
2929
2930 KASSERT((len % fsblklen) == 0);
2931
2932 while (len != 0) {
2933 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2934 if (wb) {
2935 off_t off = wb->wb_off;
2936 int error;
2937 error = wapbl_circ_read(wr, data, fsblklen, &off);
2938 if (error)
2939 return error;
2940 }
2941 data = (uint8_t *)data + fsblklen;
2942 len -= fsblklen;
2943 blk++;
2944 }
2945 return 0;
2946 }
2947
2948 #ifdef _KERNEL
2949
2950 MODULE(MODULE_CLASS_VFS, wapbl, NULL);
2951
2952 static int
2953 wapbl_modcmd(modcmd_t cmd, void *arg)
2954 {
2955
2956 switch (cmd) {
2957 case MODULE_CMD_INIT:
2958 wapbl_init();
2959 return 0;
2960 case MODULE_CMD_FINI:
2961 return wapbl_fini(true);
2962 default:
2963 return ENOTTY;
2964 }
2965 }
2966 #endif /* _KERNEL */
2967