vfs_wapbl.c revision 1.91 1 /* $NetBSD: vfs_wapbl.c,v 1.91 2017/03/17 03:17:07 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2003, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This implements file system independent write ahead filesystem logging.
34 */
35
36 #define WAPBL_INTERNAL
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.91 2017/03/17 03:17:07 riastradh Exp $");
40
41 #include <sys/param.h>
42 #include <sys/bitops.h>
43 #include <sys/time.h>
44 #include <sys/wapbl.h>
45 #include <sys/wapbl_replay.h>
46
47 #ifdef _KERNEL
48
49 #include <sys/atomic.h>
50 #include <sys/conf.h>
51 #include <sys/evcnt.h>
52 #include <sys/file.h>
53 #include <sys/kauth.h>
54 #include <sys/kernel.h>
55 #include <sys/module.h>
56 #include <sys/mount.h>
57 #include <sys/mutex.h>
58 #include <sys/namei.h>
59 #include <sys/proc.h>
60 #include <sys/resourcevar.h>
61 #include <sys/sysctl.h>
62 #include <sys/uio.h>
63 #include <sys/vnode.h>
64
65 #include <miscfs/specfs/specdev.h>
66
67 #define wapbl_alloc(s) kmem_alloc((s), KM_SLEEP)
68 #define wapbl_free(a, s) kmem_free((a), (s))
69 #define wapbl_calloc(n, s) kmem_zalloc((n)*(s), KM_SLEEP)
70
71 static struct sysctllog *wapbl_sysctl;
72 static int wapbl_flush_disk_cache = 1;
73 static int wapbl_verbose_commit = 0;
74
75 static inline size_t wapbl_space_free(size_t, off_t, off_t);
76
77 #else /* !_KERNEL */
78
79 #include <assert.h>
80 #include <errno.h>
81 #include <stdbool.h>
82 #include <stdio.h>
83 #include <stdlib.h>
84 #include <string.h>
85
86 #define KDASSERT(x) assert(x)
87 #define KASSERT(x) assert(x)
88 #define wapbl_alloc(s) malloc(s)
89 #define wapbl_free(a, s) free(a)
90 #define wapbl_calloc(n, s) calloc((n), (s))
91
92 #endif /* !_KERNEL */
93
94 /*
95 * INTERNAL DATA STRUCTURES
96 */
97
98 /*
99 * This structure holds per-mount log information.
100 *
101 * Legend: a = atomic access only
102 * r = read-only after init
103 * l = rwlock held
104 * m = mutex held
105 * lm = rwlock held writing or mutex held
106 * u = unlocked access ok
107 * b = bufcache_lock held
108 */
109 LIST_HEAD(wapbl_ino_head, wapbl_ino);
110 struct wapbl {
111 struct vnode *wl_logvp; /* r: log here */
112 struct vnode *wl_devvp; /* r: log on this device */
113 struct mount *wl_mount; /* r: mountpoint wl is associated with */
114 daddr_t wl_logpbn; /* r: Physical block number of start of log */
115 int wl_log_dev_bshift; /* r: logarithm of device block size of log
116 device */
117 int wl_fs_dev_bshift; /* r: logarithm of device block size of
118 filesystem device */
119
120 unsigned wl_lock_count; /* m: Count of transactions in progress */
121
122 size_t wl_circ_size; /* r: Number of bytes in buffer of log */
123 size_t wl_circ_off; /* r: Number of bytes reserved at start */
124
125 size_t wl_bufcount_max; /* r: Number of buffers reserved for log */
126 size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */
127
128 off_t wl_head; /* l: Byte offset of log head */
129 off_t wl_tail; /* l: Byte offset of log tail */
130 /*
131 * WAPBL log layout, stored on wl_devvp at wl_logpbn:
132 *
133 * ___________________ wl_circ_size __________________
134 * / \
135 * +---------+---------+-------+--------------+--------+
136 * [ commit0 | commit1 | CCWCW | EEEEEEEEEEEE | CCCWCW ]
137 * +---------+---------+-------+--------------+--------+
138 * wl_circ_off --^ ^-- wl_head ^-- wl_tail
139 *
140 * commit0 and commit1 are commit headers. A commit header has
141 * a generation number, indicating which of the two headers is
142 * more recent, and an assignment of head and tail pointers.
143 * The rest is a circular queue of log records, starting at
144 * the byte offset wl_circ_off.
145 *
146 * E marks empty space for records.
147 * W marks records for block writes issued but waiting.
148 * C marks completed records.
149 *
150 * wapbl_flush writes new records to empty `E' spaces after
151 * wl_head from the current transaction in memory.
152 *
153 * wapbl_truncate advances wl_tail past any completed `C'
154 * records, freeing them up for use.
155 *
156 * head == tail == 0 means log is empty.
157 * head == tail != 0 means log is full.
158 *
159 * See assertions in wapbl_advance() for other boundary
160 * conditions.
161 *
162 * Only wapbl_flush moves the head, except when wapbl_truncate
163 * sets it to 0 to indicate that the log is empty.
164 *
165 * Only wapbl_truncate moves the tail, except when wapbl_flush
166 * sets it to wl_circ_off to indicate that the log is full.
167 */
168
169 struct wapbl_wc_header *wl_wc_header; /* l */
170 void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */
171
172 kmutex_t wl_mtx; /* u: short-term lock */
173 krwlock_t wl_rwlock; /* u: File system transaction lock */
174
175 /*
176 * Must be held while accessing
177 * wl_count or wl_bufs or head or tail
178 */
179
180 #if _KERNEL
181 /*
182 * Callback called from within the flush routine to flush any extra
183 * bits. Note that flush may be skipped without calling this if
184 * there are no outstanding buffers in the transaction.
185 */
186 wapbl_flush_fn_t wl_flush; /* r */
187 wapbl_flush_fn_t wl_flush_abort;/* r */
188
189 /* Event counters */
190 char wl_ev_group[EVCNT_STRING_MAX]; /* r */
191 struct evcnt wl_ev_commit; /* l */
192 struct evcnt wl_ev_journalwrite; /* l */
193 struct evcnt wl_ev_metawrite; /* lm */
194 struct evcnt wl_ev_cacheflush; /* l */
195 #endif
196
197 size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */
198 size_t wl_bufcount; /* m: Count of buffers in wl_bufs */
199 size_t wl_bcount; /* m: Total bcount of wl_bufs */
200
201 LIST_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
202
203 kcondvar_t wl_reclaimable_cv; /* m (obviously) */
204 size_t wl_reclaimable_bytes; /* m: Amount of space available for
205 reclamation by truncate */
206 int wl_error_count; /* m: # of wl_entries with errors */
207 size_t wl_reserved_bytes; /* never truncate log smaller than this */
208
209 #ifdef WAPBL_DEBUG_BUFBYTES
210 size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
211 #endif
212
213 #if _KERNEL
214 int wl_brperjblock; /* r Block records per journal block */
215 #endif
216
217 TAILQ_HEAD(, wapbl_dealloc) wl_dealloclist; /* lm: list head */
218 int wl_dealloccnt; /* lm: total count */
219 int wl_dealloclim; /* r: max count */
220
221 /* hashtable of inode numbers for allocated but unlinked inodes */
222 /* synch ??? */
223 struct wapbl_ino_head *wl_inohash;
224 u_long wl_inohashmask;
225 int wl_inohashcnt;
226
227 SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* On disk transaction
228 accounting */
229
230 u_char *wl_buffer; /* l: buffer for wapbl_buffered_write() */
231 daddr_t wl_buffer_dblk; /* l: buffer disk block address */
232 size_t wl_buffer_used; /* l: buffer current use */
233 };
234
235 #ifdef WAPBL_DEBUG_PRINT
236 int wapbl_debug_print = WAPBL_DEBUG_PRINT;
237 #endif
238
239 /****************************************************************/
240 #ifdef _KERNEL
241
242 #ifdef WAPBL_DEBUG
243 struct wapbl *wapbl_debug_wl;
244 #endif
245
246 static int wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail);
247 static int wapbl_write_blocks(struct wapbl *wl, off_t *offp);
248 static int wapbl_write_revocations(struct wapbl *wl, off_t *offp);
249 static int wapbl_write_inodes(struct wapbl *wl, off_t *offp);
250 #endif /* _KERNEL */
251
252 static int wapbl_replay_process(struct wapbl_replay *wr, off_t, off_t);
253
254 static inline size_t wapbl_space_used(size_t avail, off_t head,
255 off_t tail);
256
257 #ifdef _KERNEL
258
259 static struct pool wapbl_entry_pool;
260 static struct pool wapbl_dealloc_pool;
261
262 #define WAPBL_INODETRK_SIZE 83
263 static int wapbl_ino_pool_refcount;
264 static struct pool wapbl_ino_pool;
265 struct wapbl_ino {
266 LIST_ENTRY(wapbl_ino) wi_hash;
267 ino_t wi_ino;
268 mode_t wi_mode;
269 };
270
271 static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
272 static void wapbl_inodetrk_free(struct wapbl *wl);
273 static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
274
275 static size_t wapbl_transaction_len(struct wapbl *wl);
276 static inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
277
278 static void wapbl_deallocation_free(struct wapbl *, struct wapbl_dealloc *,
279 bool);
280
281 static void wapbl_evcnt_init(struct wapbl *);
282 static void wapbl_evcnt_free(struct wapbl *);
283
284 #if 0
285 int wapbl_replay_verify(struct wapbl_replay *, struct vnode *);
286 #endif
287
288 static int wapbl_replay_isopen1(struct wapbl_replay *);
289
290 struct wapbl_ops wapbl_ops = {
291 .wo_wapbl_discard = wapbl_discard,
292 .wo_wapbl_replay_isopen = wapbl_replay_isopen1,
293 .wo_wapbl_replay_can_read = wapbl_replay_can_read,
294 .wo_wapbl_replay_read = wapbl_replay_read,
295 .wo_wapbl_add_buf = wapbl_add_buf,
296 .wo_wapbl_remove_buf = wapbl_remove_buf,
297 .wo_wapbl_resize_buf = wapbl_resize_buf,
298 .wo_wapbl_begin = wapbl_begin,
299 .wo_wapbl_end = wapbl_end,
300 .wo_wapbl_junlock_assert= wapbl_junlock_assert,
301
302 /* XXX: the following is only used to say "this is a wapbl buf" */
303 .wo_wapbl_biodone = wapbl_biodone,
304 };
305
306 static int
307 wapbl_sysctl_init(void)
308 {
309 int rv;
310 const struct sysctlnode *rnode, *cnode;
311
312 wapbl_sysctl = NULL;
313
314 rv = sysctl_createv(&wapbl_sysctl, 0, NULL, &rnode,
315 CTLFLAG_PERMANENT,
316 CTLTYPE_NODE, "wapbl",
317 SYSCTL_DESCR("WAPBL journaling options"),
318 NULL, 0, NULL, 0,
319 CTL_VFS, CTL_CREATE, CTL_EOL);
320 if (rv)
321 return rv;
322
323 rv = sysctl_createv(&wapbl_sysctl, 0, &rnode, &cnode,
324 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
325 CTLTYPE_INT, "flush_disk_cache",
326 SYSCTL_DESCR("flush disk cache"),
327 NULL, 0, &wapbl_flush_disk_cache, 0,
328 CTL_CREATE, CTL_EOL);
329 if (rv)
330 return rv;
331
332 rv = sysctl_createv(&wapbl_sysctl, 0, &rnode, &cnode,
333 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
334 CTLTYPE_INT, "verbose_commit",
335 SYSCTL_DESCR("show time and size of wapbl log commits"),
336 NULL, 0, &wapbl_verbose_commit, 0,
337 CTL_CREATE, CTL_EOL);
338 return rv;
339 }
340
341 static void
342 wapbl_init(void)
343 {
344
345 pool_init(&wapbl_entry_pool, sizeof(struct wapbl_entry), 0, 0, 0,
346 "wapblentrypl", &pool_allocator_kmem, IPL_VM);
347 pool_init(&wapbl_dealloc_pool, sizeof(struct wapbl_dealloc), 0, 0, 0,
348 "wapbldealloc", &pool_allocator_nointr, IPL_NONE);
349
350 wapbl_sysctl_init();
351 }
352
353 static int
354 wapbl_fini(void)
355 {
356
357 if (wapbl_sysctl != NULL)
358 sysctl_teardown(&wapbl_sysctl);
359
360 pool_destroy(&wapbl_dealloc_pool);
361 pool_destroy(&wapbl_entry_pool);
362
363 return 0;
364 }
365
366 static void
367 wapbl_evcnt_init(struct wapbl *wl)
368 {
369 snprintf(wl->wl_ev_group, sizeof(wl->wl_ev_group),
370 "wapbl fsid 0x%x/0x%x",
371 wl->wl_mount->mnt_stat.f_fsidx.__fsid_val[0],
372 wl->wl_mount->mnt_stat.f_fsidx.__fsid_val[1]
373 );
374
375 evcnt_attach_dynamic(&wl->wl_ev_commit, EVCNT_TYPE_MISC,
376 NULL, wl->wl_ev_group, "commit");
377 evcnt_attach_dynamic(&wl->wl_ev_journalwrite, EVCNT_TYPE_MISC,
378 NULL, wl->wl_ev_group, "journal sync block write");
379 evcnt_attach_dynamic(&wl->wl_ev_metawrite, EVCNT_TYPE_MISC,
380 NULL, wl->wl_ev_group, "metadata finished block write");
381 evcnt_attach_dynamic(&wl->wl_ev_cacheflush, EVCNT_TYPE_MISC,
382 NULL, wl->wl_ev_group, "cache flush");
383 }
384
385 static void
386 wapbl_evcnt_free(struct wapbl *wl)
387 {
388 evcnt_detach(&wl->wl_ev_commit);
389 evcnt_detach(&wl->wl_ev_journalwrite);
390 evcnt_detach(&wl->wl_ev_metawrite);
391 evcnt_detach(&wl->wl_ev_cacheflush);
392 }
393
394 static int
395 wapbl_start_flush_inodes(struct wapbl *wl, struct wapbl_replay *wr)
396 {
397 int error, i;
398
399 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
400 ("wapbl_start: reusing log with %d inodes\n", wr->wr_inodescnt));
401
402 /*
403 * Its only valid to reuse the replay log if its
404 * the same as the new log we just opened.
405 */
406 KDASSERT(!wapbl_replay_isopen(wr));
407 KASSERT(wl->wl_devvp->v_type == VBLK);
408 KASSERT(wr->wr_devvp->v_type == VBLK);
409 KASSERT(wl->wl_devvp->v_rdev == wr->wr_devvp->v_rdev);
410 KASSERT(wl->wl_logpbn == wr->wr_logpbn);
411 KASSERT(wl->wl_circ_size == wr->wr_circ_size);
412 KASSERT(wl->wl_circ_off == wr->wr_circ_off);
413 KASSERT(wl->wl_log_dev_bshift == wr->wr_log_dev_bshift);
414 KASSERT(wl->wl_fs_dev_bshift == wr->wr_fs_dev_bshift);
415
416 wl->wl_wc_header->wc_generation = wr->wr_generation + 1;
417
418 for (i = 0; i < wr->wr_inodescnt; i++)
419 wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
420 wr->wr_inodes[i].wr_imode);
421
422 /* Make sure new transaction won't overwrite old inodes list */
423 KDASSERT(wapbl_transaction_len(wl) <=
424 wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
425 wr->wr_inodestail));
426
427 wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
428 wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
429 wapbl_transaction_len(wl);
430
431 error = wapbl_write_inodes(wl, &wl->wl_head);
432 if (error)
433 return error;
434
435 KASSERT(wl->wl_head != wl->wl_tail);
436 KASSERT(wl->wl_head != 0);
437
438 return 0;
439 }
440
441 int
442 wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
443 daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
444 wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
445 {
446 struct wapbl *wl;
447 struct vnode *devvp;
448 daddr_t logpbn;
449 int error;
450 int log_dev_bshift = ilog2(blksize);
451 int fs_dev_bshift = log_dev_bshift;
452 int run;
453
454 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_start: vp=%p off=%" PRId64
455 " count=%zu blksize=%zu\n", vp, off, count, blksize));
456
457 if (log_dev_bshift > fs_dev_bshift) {
458 WAPBL_PRINTF(WAPBL_PRINT_OPEN,
459 ("wapbl: log device's block size cannot be larger "
460 "than filesystem's\n"));
461 /*
462 * Not currently implemented, although it could be if
463 * needed someday.
464 */
465 return ENOSYS;
466 }
467
468 if (off < 0)
469 return EINVAL;
470
471 if (blksize < DEV_BSIZE)
472 return EINVAL;
473 if (blksize % DEV_BSIZE)
474 return EINVAL;
475
476 /* XXXTODO: verify that the full load is writable */
477
478 /*
479 * XXX check for minimum log size
480 * minimum is governed by minimum amount of space
481 * to complete a transaction. (probably truncate)
482 */
483 /* XXX for now pick something minimal */
484 if ((count * blksize) < MAXPHYS) {
485 return ENOSPC;
486 }
487
488 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
489 return error;
490 }
491
492 wl = wapbl_calloc(1, sizeof(*wl));
493 rw_init(&wl->wl_rwlock);
494 mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
495 cv_init(&wl->wl_reclaimable_cv, "wapblrec");
496 LIST_INIT(&wl->wl_bufs);
497 SIMPLEQ_INIT(&wl->wl_entries);
498
499 wl->wl_logvp = vp;
500 wl->wl_devvp = devvp;
501 wl->wl_mount = mp;
502 wl->wl_logpbn = logpbn;
503 wl->wl_log_dev_bshift = log_dev_bshift;
504 wl->wl_fs_dev_bshift = fs_dev_bshift;
505
506 wl->wl_flush = flushfn;
507 wl->wl_flush_abort = flushabortfn;
508
509 /* Reserve two log device blocks for the commit headers */
510 wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
511 wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
512 /* truncate the log usage to a multiple of log_dev_bshift */
513 wl->wl_circ_size >>= wl->wl_log_dev_bshift;
514 wl->wl_circ_size <<= wl->wl_log_dev_bshift;
515
516 /*
517 * wl_bufbytes_max limits the size of the in memory transaction space.
518 * - Since buffers are allocated and accounted for in units of
519 * PAGE_SIZE it is required to be a multiple of PAGE_SIZE
520 * (i.e. 1<<PAGE_SHIFT)
521 * - Since the log device has to be written in units of
522 * 1<<wl_log_dev_bshift it is required to be a mulitple of
523 * 1<<wl_log_dev_bshift.
524 * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
525 * it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
526 * Therefore it must be multiple of the least common multiple of those
527 * three quantities. Fortunately, all of those quantities are
528 * guaranteed to be a power of two, and the least common multiple of
529 * a set of numbers which are all powers of two is simply the maximum
530 * of those numbers. Finally, the maximum logarithm of a power of two
531 * is the same as the log of the maximum power of two. So we can do
532 * the following operations to size wl_bufbytes_max:
533 */
534
535 /* XXX fix actual number of pages reserved per filesystem. */
536 wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
537
538 /* Round wl_bufbytes_max to the largest power of two constraint */
539 wl->wl_bufbytes_max >>= PAGE_SHIFT;
540 wl->wl_bufbytes_max <<= PAGE_SHIFT;
541 wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
542 wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
543 wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
544 wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
545
546 /* XXX maybe use filesystem fragment size instead of 1024 */
547 /* XXX fix actual number of buffers reserved per filesystem. */
548 wl->wl_bufcount_max = (nbuf / 2) * 1024;
549
550 wl->wl_brperjblock = ((1<<wl->wl_log_dev_bshift)
551 - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
552 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
553 KASSERT(wl->wl_brperjblock > 0);
554
555 /* XXX tie this into resource estimation */
556 wl->wl_dealloclim = wl->wl_bufbytes_max / mp->mnt_stat.f_bsize / 2;
557 TAILQ_INIT(&wl->wl_dealloclist);
558
559 wl->wl_buffer = wapbl_alloc(MAXPHYS);
560 wl->wl_buffer_used = 0;
561
562 wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
563
564 wapbl_evcnt_init(wl);
565
566 /* Initialize the commit header */
567 {
568 struct wapbl_wc_header *wc;
569 size_t len = 1 << wl->wl_log_dev_bshift;
570 wc = wapbl_calloc(1, len);
571 wc->wc_type = WAPBL_WC_HEADER;
572 wc->wc_len = len;
573 wc->wc_circ_off = wl->wl_circ_off;
574 wc->wc_circ_size = wl->wl_circ_size;
575 /* XXX wc->wc_fsid */
576 wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
577 wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
578 wl->wl_wc_header = wc;
579 wl->wl_wc_scratch = wapbl_alloc(len);
580 }
581
582 /*
583 * if there was an existing set of unlinked but
584 * allocated inodes, preserve it in the new
585 * log.
586 */
587 if (wr && wr->wr_inodescnt) {
588 error = wapbl_start_flush_inodes(wl, wr);
589 if (error)
590 goto errout;
591 }
592
593 error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
594 if (error) {
595 goto errout;
596 }
597
598 *wlp = wl;
599 #if defined(WAPBL_DEBUG)
600 wapbl_debug_wl = wl;
601 #endif
602
603 return 0;
604 errout:
605 wapbl_discard(wl);
606 wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
607 wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
608 wapbl_free(wl->wl_buffer, MAXPHYS);
609 wapbl_inodetrk_free(wl);
610 wapbl_free(wl, sizeof(*wl));
611
612 return error;
613 }
614
615 /*
616 * Like wapbl_flush, only discards the transaction
617 * completely
618 */
619
620 void
621 wapbl_discard(struct wapbl *wl)
622 {
623 struct wapbl_entry *we;
624 struct wapbl_dealloc *wd;
625 struct buf *bp;
626 int i;
627
628 /*
629 * XXX we may consider using upgrade here
630 * if we want to call flush from inside a transaction
631 */
632 rw_enter(&wl->wl_rwlock, RW_WRITER);
633 wl->wl_flush(wl->wl_mount, TAILQ_FIRST(&wl->wl_dealloclist));
634
635 #ifdef WAPBL_DEBUG_PRINT
636 {
637 pid_t pid = -1;
638 lwpid_t lid = -1;
639 if (curproc)
640 pid = curproc->p_pid;
641 if (curlwp)
642 lid = curlwp->l_lid;
643 #ifdef WAPBL_DEBUG_BUFBYTES
644 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
645 ("wapbl_discard: thread %d.%d discarding "
646 "transaction\n"
647 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
648 "deallocs=%d inodes=%d\n"
649 "\terrcnt = %u, reclaimable=%zu reserved=%zu "
650 "unsynced=%zu\n",
651 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
652 wl->wl_bcount, wl->wl_dealloccnt,
653 wl->wl_inohashcnt, wl->wl_error_count,
654 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
655 wl->wl_unsynced_bufbytes));
656 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
657 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
658 ("\tentry: bufcount = %zu, reclaimable = %zu, "
659 "error = %d, unsynced = %zu\n",
660 we->we_bufcount, we->we_reclaimable_bytes,
661 we->we_error, we->we_unsynced_bufbytes));
662 }
663 #else /* !WAPBL_DEBUG_BUFBYTES */
664 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
665 ("wapbl_discard: thread %d.%d discarding transaction\n"
666 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
667 "deallocs=%d inodes=%d\n"
668 "\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
669 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
670 wl->wl_bcount, wl->wl_dealloccnt,
671 wl->wl_inohashcnt, wl->wl_error_count,
672 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes));
673 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
674 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
675 ("\tentry: bufcount = %zu, reclaimable = %zu, "
676 "error = %d\n",
677 we->we_bufcount, we->we_reclaimable_bytes,
678 we->we_error));
679 }
680 #endif /* !WAPBL_DEBUG_BUFBYTES */
681 }
682 #endif /* WAPBL_DEBUG_PRINT */
683
684 for (i = 0; i <= wl->wl_inohashmask; i++) {
685 struct wapbl_ino_head *wih;
686 struct wapbl_ino *wi;
687
688 wih = &wl->wl_inohash[i];
689 while ((wi = LIST_FIRST(wih)) != NULL) {
690 LIST_REMOVE(wi, wi_hash);
691 pool_put(&wapbl_ino_pool, wi);
692 KASSERT(wl->wl_inohashcnt > 0);
693 wl->wl_inohashcnt--;
694 }
695 }
696
697 /*
698 * clean buffer list
699 */
700 mutex_enter(&bufcache_lock);
701 mutex_enter(&wl->wl_mtx);
702 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
703 if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
704 /*
705 * The buffer will be unlocked and
706 * removed from the transaction in brelse
707 */
708 mutex_exit(&wl->wl_mtx);
709 brelsel(bp, 0);
710 mutex_enter(&wl->wl_mtx);
711 }
712 }
713 mutex_exit(&wl->wl_mtx);
714 mutex_exit(&bufcache_lock);
715
716 /*
717 * Remove references to this wl from wl_entries, free any which
718 * no longer have buffers, others will be freed in wapbl_biodone
719 * when they no longer have any buffers.
720 */
721 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
722 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
723 /* XXX should we be accumulating wl_error_count
724 * and increasing reclaimable bytes ? */
725 we->we_wapbl = NULL;
726 if (we->we_bufcount == 0) {
727 #ifdef WAPBL_DEBUG_BUFBYTES
728 KASSERT(we->we_unsynced_bufbytes == 0);
729 #endif
730 pool_put(&wapbl_entry_pool, we);
731 }
732 }
733
734 /* Discard list of deallocs */
735 while ((wd = TAILQ_FIRST(&wl->wl_dealloclist)) != NULL)
736 wapbl_deallocation_free(wl, wd, true);
737
738 /* XXX should we clear wl_reserved_bytes? */
739
740 KASSERT(wl->wl_bufbytes == 0);
741 KASSERT(wl->wl_bcount == 0);
742 KASSERT(wl->wl_bufcount == 0);
743 KASSERT(LIST_EMPTY(&wl->wl_bufs));
744 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
745 KASSERT(wl->wl_inohashcnt == 0);
746 KASSERT(TAILQ_EMPTY(&wl->wl_dealloclist));
747 KASSERT(wl->wl_dealloccnt == 0);
748
749 rw_exit(&wl->wl_rwlock);
750 }
751
752 int
753 wapbl_stop(struct wapbl *wl, int force)
754 {
755 int error;
756
757 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
758 error = wapbl_flush(wl, 1);
759 if (error) {
760 if (force)
761 wapbl_discard(wl);
762 else
763 return error;
764 }
765
766 /* Unlinked inodes persist after a flush */
767 if (wl->wl_inohashcnt) {
768 if (force) {
769 wapbl_discard(wl);
770 } else {
771 return EBUSY;
772 }
773 }
774
775 KASSERT(wl->wl_bufbytes == 0);
776 KASSERT(wl->wl_bcount == 0);
777 KASSERT(wl->wl_bufcount == 0);
778 KASSERT(LIST_EMPTY(&wl->wl_bufs));
779 KASSERT(wl->wl_dealloccnt == 0);
780 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
781 KASSERT(wl->wl_inohashcnt == 0);
782 KASSERT(TAILQ_EMPTY(&wl->wl_dealloclist));
783 KASSERT(wl->wl_dealloccnt == 0);
784
785 wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
786 wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
787 wapbl_free(wl->wl_buffer, MAXPHYS);
788 wapbl_inodetrk_free(wl);
789
790 wapbl_evcnt_free(wl);
791
792 cv_destroy(&wl->wl_reclaimable_cv);
793 mutex_destroy(&wl->wl_mtx);
794 rw_destroy(&wl->wl_rwlock);
795 wapbl_free(wl, sizeof(*wl));
796
797 return 0;
798 }
799
800 /****************************************************************/
801 /*
802 * Unbuffered disk I/O
803 */
804
805 static int
806 wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
807 {
808 struct pstats *pstats = curlwp->l_proc->p_stats;
809 struct buf *bp;
810 int error;
811
812 KASSERT((flags & ~(B_WRITE | B_READ)) == 0);
813 KASSERT(devvp->v_type == VBLK);
814
815 if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
816 mutex_enter(devvp->v_interlock);
817 devvp->v_numoutput++;
818 mutex_exit(devvp->v_interlock);
819 pstats->p_ru.ru_oublock++;
820 } else {
821 pstats->p_ru.ru_inblock++;
822 }
823
824 bp = getiobuf(devvp, true);
825 bp->b_flags = flags;
826 bp->b_cflags = BC_BUSY; /* silly & dubious */
827 bp->b_dev = devvp->v_rdev;
828 bp->b_data = data;
829 bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
830 bp->b_blkno = pbn;
831 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
832
833 WAPBL_PRINTF(WAPBL_PRINT_IO,
834 ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%"PRIx64"\n",
835 BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
836 bp->b_blkno, bp->b_dev));
837
838 VOP_STRATEGY(devvp, bp);
839
840 error = biowait(bp);
841 putiobuf(bp);
842
843 if (error) {
844 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
845 ("wapbl_doio: %s %zu bytes at block %" PRId64
846 " on dev 0x%"PRIx64" failed with error %d\n",
847 (((flags & (B_WRITE | B_READ)) == B_WRITE) ?
848 "write" : "read"),
849 len, pbn, devvp->v_rdev, error));
850 }
851
852 return error;
853 }
854
855 /*
856 * wapbl_write(data, len, devvp, pbn)
857 *
858 * Synchronously write len bytes from data to physical block pbn
859 * on devvp.
860 */
861 int
862 wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
863 {
864
865 return wapbl_doio(data, len, devvp, pbn, B_WRITE);
866 }
867
868 /*
869 * wapbl_read(data, len, devvp, pbn)
870 *
871 * Synchronously read len bytes into data from physical block pbn
872 * on devvp.
873 */
874 int
875 wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
876 {
877
878 return wapbl_doio(data, len, devvp, pbn, B_READ);
879 }
880
881 /****************************************************************/
882 /*
883 * Buffered disk writes -- try to coalesce writes and emit
884 * MAXPHYS-aligned blocks.
885 */
886
887 /*
888 * wapbl_buffered_flush(wl)
889 *
890 * Flush any buffered writes from wapbl_buffered_write.
891 */
892 static int
893 wapbl_buffered_flush(struct wapbl *wl)
894 {
895 int error;
896
897 if (wl->wl_buffer_used == 0)
898 return 0;
899
900 error = wapbl_doio(wl->wl_buffer, wl->wl_buffer_used,
901 wl->wl_devvp, wl->wl_buffer_dblk, B_WRITE);
902 wl->wl_buffer_used = 0;
903
904 wl->wl_ev_journalwrite.ev_count++;
905
906 return error;
907 }
908
909 /*
910 * wapbl_buffered_write(data, len, wl, pbn)
911 *
912 * Write len bytes from data to physical block pbn on
913 * wl->wl_devvp. The write may not complete until
914 * wapbl_buffered_flush.
915 */
916 static int
917 wapbl_buffered_write(void *data, size_t len, struct wapbl *wl, daddr_t pbn)
918 {
919 int error;
920 size_t resid;
921
922 /*
923 * If not adjacent to buffered data flush first. Disk block
924 * address is always valid for non-empty buffer.
925 */
926 if (wl->wl_buffer_used > 0 &&
927 pbn != wl->wl_buffer_dblk + btodb(wl->wl_buffer_used)) {
928 error = wapbl_buffered_flush(wl);
929 if (error)
930 return error;
931 }
932 /*
933 * If this write goes to an empty buffer we have to
934 * save the disk block address first.
935 */
936 if (wl->wl_buffer_used == 0)
937 wl->wl_buffer_dblk = pbn;
938 /*
939 * Remaining space so this buffer ends on a MAXPHYS boundary.
940 *
941 * Cannot become less or equal zero as the buffer would have been
942 * flushed on the last call then.
943 */
944 resid = MAXPHYS - dbtob(wl->wl_buffer_dblk % btodb(MAXPHYS)) -
945 wl->wl_buffer_used;
946 KASSERT(resid > 0);
947 KASSERT(dbtob(btodb(resid)) == resid);
948 if (len >= resid) {
949 memcpy(wl->wl_buffer + wl->wl_buffer_used, data, resid);
950 wl->wl_buffer_used += resid;
951 error = wapbl_doio(wl->wl_buffer, wl->wl_buffer_used,
952 wl->wl_devvp, wl->wl_buffer_dblk, B_WRITE);
953 data = (uint8_t *)data + resid;
954 len -= resid;
955 wl->wl_buffer_dblk = pbn + btodb(resid);
956 wl->wl_buffer_used = 0;
957 if (error)
958 return error;
959 }
960 KASSERT(len < MAXPHYS);
961 if (len > 0) {
962 memcpy(wl->wl_buffer + wl->wl_buffer_used, data, len);
963 wl->wl_buffer_used += len;
964 }
965
966 return 0;
967 }
968
969 /*
970 * wapbl_circ_write(wl, data, len, offp)
971 *
972 * Write len bytes from data to the circular queue of wl, starting
973 * at linear byte offset *offp, and returning the new linear byte
974 * offset in *offp.
975 *
976 * If the starting linear byte offset precedes wl->wl_circ_off,
977 * the write instead begins at wl->wl_circ_off. XXX WTF? This
978 * should be a KASSERT, not a conditional.
979 *
980 * The write is buffered in wl and must be flushed with
981 * wapbl_buffered_flush before it will be submitted to the disk.
982 */
983 static int
984 wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
985 {
986 size_t slen;
987 off_t off = *offp;
988 int error;
989 daddr_t pbn;
990
991 KDASSERT(((len >> wl->wl_log_dev_bshift) <<
992 wl->wl_log_dev_bshift) == len);
993
994 if (off < wl->wl_circ_off)
995 off = wl->wl_circ_off;
996 slen = wl->wl_circ_off + wl->wl_circ_size - off;
997 if (slen < len) {
998 pbn = wl->wl_logpbn + (off >> wl->wl_log_dev_bshift);
999 #ifdef _KERNEL
1000 pbn = btodb(pbn << wl->wl_log_dev_bshift);
1001 #endif
1002 error = wapbl_buffered_write(data, slen, wl, pbn);
1003 if (error)
1004 return error;
1005 data = (uint8_t *)data + slen;
1006 len -= slen;
1007 off = wl->wl_circ_off;
1008 }
1009 pbn = wl->wl_logpbn + (off >> wl->wl_log_dev_bshift);
1010 #ifdef _KERNEL
1011 pbn = btodb(pbn << wl->wl_log_dev_bshift);
1012 #endif
1013 error = wapbl_buffered_write(data, len, wl, pbn);
1014 if (error)
1015 return error;
1016 off += len;
1017 if (off >= wl->wl_circ_off + wl->wl_circ_size)
1018 off = wl->wl_circ_off;
1019 *offp = off;
1020 return 0;
1021 }
1022
1023 /****************************************************************/
1024 /*
1025 * WAPBL transactions: entering, adding/removing bufs, and exiting
1026 */
1027
1028 int
1029 wapbl_begin(struct wapbl *wl, const char *file, int line)
1030 {
1031 int doflush;
1032 unsigned lockcount;
1033
1034 KDASSERT(wl);
1035
1036 /*
1037 * XXX this needs to be made much more sophisticated.
1038 * perhaps each wapbl_begin could reserve a specified
1039 * number of buffers and bytes.
1040 */
1041 mutex_enter(&wl->wl_mtx);
1042 lockcount = wl->wl_lock_count;
1043 doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) >
1044 wl->wl_bufbytes_max / 2) ||
1045 ((wl->wl_bufcount + (lockcount * 10)) >
1046 wl->wl_bufcount_max / 2) ||
1047 (wapbl_transaction_len(wl) > wl->wl_circ_size / 2) ||
1048 (wl->wl_dealloccnt >= (wl->wl_dealloclim / 2));
1049 mutex_exit(&wl->wl_mtx);
1050
1051 if (doflush) {
1052 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1053 ("force flush lockcnt=%d bufbytes=%zu "
1054 "(max=%zu) bufcount=%zu (max=%zu) "
1055 "dealloccnt %d (lim=%d)\n",
1056 lockcount, wl->wl_bufbytes,
1057 wl->wl_bufbytes_max, wl->wl_bufcount,
1058 wl->wl_bufcount_max,
1059 wl->wl_dealloccnt, wl->wl_dealloclim));
1060 }
1061
1062 if (doflush) {
1063 int error = wapbl_flush(wl, 0);
1064 if (error)
1065 return error;
1066 }
1067
1068 rw_enter(&wl->wl_rwlock, RW_READER);
1069 mutex_enter(&wl->wl_mtx);
1070 wl->wl_lock_count++;
1071 mutex_exit(&wl->wl_mtx);
1072
1073 #if defined(WAPBL_DEBUG_PRINT)
1074 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
1075 ("wapbl_begin thread %d.%d with bufcount=%zu "
1076 "bufbytes=%zu bcount=%zu at %s:%d\n",
1077 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1078 wl->wl_bufbytes, wl->wl_bcount, file, line));
1079 #endif
1080
1081 return 0;
1082 }
1083
1084 void
1085 wapbl_end(struct wapbl *wl)
1086 {
1087
1088 #if defined(WAPBL_DEBUG_PRINT)
1089 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
1090 ("wapbl_end thread %d.%d with bufcount=%zu "
1091 "bufbytes=%zu bcount=%zu\n",
1092 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1093 wl->wl_bufbytes, wl->wl_bcount));
1094 #endif
1095
1096 /*
1097 * XXX this could be handled more gracefully, perhaps place
1098 * only a partial transaction in the log and allow the
1099 * remaining to flush without the protection of the journal.
1100 */
1101 KASSERTMSG((wapbl_transaction_len(wl) <=
1102 (wl->wl_circ_size - wl->wl_reserved_bytes)),
1103 "wapbl_end: current transaction too big to flush");
1104
1105 mutex_enter(&wl->wl_mtx);
1106 KASSERT(wl->wl_lock_count > 0);
1107 wl->wl_lock_count--;
1108 mutex_exit(&wl->wl_mtx);
1109
1110 rw_exit(&wl->wl_rwlock);
1111 }
1112
1113 void
1114 wapbl_add_buf(struct wapbl *wl, struct buf * bp)
1115 {
1116
1117 KASSERT(bp->b_cflags & BC_BUSY);
1118 KASSERT(bp->b_vp);
1119
1120 wapbl_jlock_assert(wl);
1121
1122 #if 0
1123 /*
1124 * XXX this might be an issue for swapfiles.
1125 * see uvm_swap.c:1702
1126 *
1127 * XXX2 why require it then? leap of semantics?
1128 */
1129 KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
1130 #endif
1131
1132 mutex_enter(&wl->wl_mtx);
1133 if (bp->b_flags & B_LOCKED) {
1134 LIST_REMOVE(bp, b_wapbllist);
1135 WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
1136 ("wapbl_add_buf thread %d.%d re-adding buf %p "
1137 "with %d bytes %d bcount\n",
1138 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
1139 bp->b_bcount));
1140 } else {
1141 /* unlocked by dirty buffers shouldn't exist */
1142 KASSERT(!(bp->b_oflags & BO_DELWRI));
1143 wl->wl_bufbytes += bp->b_bufsize;
1144 wl->wl_bcount += bp->b_bcount;
1145 wl->wl_bufcount++;
1146 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
1147 ("wapbl_add_buf thread %d.%d adding buf %p "
1148 "with %d bytes %d bcount\n",
1149 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
1150 bp->b_bcount));
1151 }
1152 LIST_INSERT_HEAD(&wl->wl_bufs, bp, b_wapbllist);
1153 mutex_exit(&wl->wl_mtx);
1154
1155 bp->b_flags |= B_LOCKED;
1156 }
1157
1158 static void
1159 wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
1160 {
1161
1162 KASSERT(mutex_owned(&wl->wl_mtx));
1163 KASSERT(bp->b_cflags & BC_BUSY);
1164 wapbl_jlock_assert(wl);
1165
1166 #if 0
1167 /*
1168 * XXX this might be an issue for swapfiles.
1169 * see uvm_swap.c:1725
1170 *
1171 * XXXdeux: see above
1172 */
1173 KASSERT((bp->b_flags & BC_NOCACHE) == 0);
1174 #endif
1175 KASSERT(bp->b_flags & B_LOCKED);
1176
1177 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
1178 ("wapbl_remove_buf thread %d.%d removing buf %p with "
1179 "%d bytes %d bcount\n",
1180 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, bp->b_bcount));
1181
1182 KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
1183 wl->wl_bufbytes -= bp->b_bufsize;
1184 KASSERT(wl->wl_bcount >= bp->b_bcount);
1185 wl->wl_bcount -= bp->b_bcount;
1186 KASSERT(wl->wl_bufcount > 0);
1187 wl->wl_bufcount--;
1188 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1189 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1190 LIST_REMOVE(bp, b_wapbllist);
1191
1192 bp->b_flags &= ~B_LOCKED;
1193 }
1194
1195 /* called from brelsel() in vfs_bio among other places */
1196 void
1197 wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
1198 {
1199
1200 mutex_enter(&wl->wl_mtx);
1201 wapbl_remove_buf_locked(wl, bp);
1202 mutex_exit(&wl->wl_mtx);
1203 }
1204
1205 void
1206 wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
1207 {
1208
1209 KASSERT(bp->b_cflags & BC_BUSY);
1210
1211 /*
1212 * XXX: why does this depend on B_LOCKED? otherwise the buf
1213 * is not for a transaction? if so, why is this called in the
1214 * first place?
1215 */
1216 if (bp->b_flags & B_LOCKED) {
1217 mutex_enter(&wl->wl_mtx);
1218 wl->wl_bufbytes += bp->b_bufsize - oldsz;
1219 wl->wl_bcount += bp->b_bcount - oldcnt;
1220 mutex_exit(&wl->wl_mtx);
1221 }
1222 }
1223
1224 #endif /* _KERNEL */
1225
1226 /****************************************************************/
1227 /* Some utility inlines */
1228
1229 /*
1230 * wapbl_space_used(avail, head, tail)
1231 *
1232 * Number of bytes used in a circular queue of avail total bytes,
1233 * from tail to head.
1234 */
1235 static inline size_t
1236 wapbl_space_used(size_t avail, off_t head, off_t tail)
1237 {
1238
1239 if (tail == 0) {
1240 KASSERT(head == 0);
1241 return 0;
1242 }
1243 return ((head + (avail - 1) - tail) % avail) + 1;
1244 }
1245
1246 #ifdef _KERNEL
1247 /*
1248 * wapbl_advance(size, off, oldoff, delta)
1249 *
1250 * Given a byte offset oldoff into a circular queue of size bytes
1251 * starting at off, return a new byte offset oldoff + delta into
1252 * the circular queue.
1253 */
1254 static inline off_t
1255 wapbl_advance(size_t size, size_t off, off_t oldoff, size_t delta)
1256 {
1257 off_t newoff;
1258
1259 /* Define acceptable ranges for inputs. */
1260 KASSERT(delta <= (size_t)size);
1261 KASSERT((oldoff == 0) || ((size_t)oldoff >= off));
1262 KASSERT(oldoff < (off_t)(size + off));
1263
1264 if ((oldoff == 0) && (delta != 0))
1265 newoff = off + delta;
1266 else if ((oldoff + delta) < (size + off))
1267 newoff = oldoff + delta;
1268 else
1269 newoff = (oldoff + delta) - size;
1270
1271 /* Note some interesting axioms */
1272 KASSERT((delta != 0) || (newoff == oldoff));
1273 KASSERT((delta == 0) || (newoff != 0));
1274 KASSERT((delta != (size)) || (newoff == oldoff));
1275
1276 /* Define acceptable ranges for output. */
1277 KASSERT((newoff == 0) || ((size_t)newoff >= off));
1278 KASSERT((size_t)newoff < (size + off));
1279 return newoff;
1280 }
1281
1282 /*
1283 * wapbl_space_free(avail, head, tail)
1284 *
1285 * Number of bytes free in a circular queue of avail total bytes,
1286 * in which everything from tail to head is used.
1287 */
1288 static inline size_t
1289 wapbl_space_free(size_t avail, off_t head, off_t tail)
1290 {
1291
1292 return avail - wapbl_space_used(avail, head, tail);
1293 }
1294
1295 /*
1296 * wapbl_advance_head(size, off, delta, headp, tailp)
1297 *
1298 * In a circular queue of size bytes starting at off, given the
1299 * old head and tail offsets *headp and *tailp, store the new head
1300 * and tail offsets in *headp and *tailp resulting from adding
1301 * delta bytes of data to the head.
1302 */
1303 static inline void
1304 wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1305 off_t *tailp)
1306 {
1307 off_t head = *headp;
1308 off_t tail = *tailp;
1309
1310 KASSERT(delta <= wapbl_space_free(size, head, tail));
1311 head = wapbl_advance(size, off, head, delta);
1312 if ((tail == 0) && (head != 0))
1313 tail = off;
1314 *headp = head;
1315 *tailp = tail;
1316 }
1317
1318 /*
1319 * wapbl_advance_tail(size, off, delta, headp, tailp)
1320 *
1321 * In a circular queue of size bytes starting at off, given the
1322 * old head and tail offsets *headp and *tailp, store the new head
1323 * and tail offsets in *headp and *tailp resulting from removing
1324 * delta bytes of data from the tail.
1325 */
1326 static inline void
1327 wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1328 off_t *tailp)
1329 {
1330 off_t head = *headp;
1331 off_t tail = *tailp;
1332
1333 KASSERT(delta <= wapbl_space_used(size, head, tail));
1334 tail = wapbl_advance(size, off, tail, delta);
1335 if (head == tail) {
1336 head = tail = 0;
1337 }
1338 *headp = head;
1339 *tailp = tail;
1340 }
1341
1342
1343 /****************************************************************/
1344
1345 /*
1346 * wapbl_truncate(wl, minfree)
1347 *
1348 * Wait until at least minfree bytes are available in the log.
1349 *
1350 * If it was necessary to wait for writes to complete,
1351 * advance the circular queue tail to reflect the new write
1352 * completions and issue a write commit to the log.
1353 *
1354 * => Caller must hold wl->wl_rwlock writer lock.
1355 */
1356 static int
1357 wapbl_truncate(struct wapbl *wl, size_t minfree)
1358 {
1359 size_t delta;
1360 size_t avail;
1361 off_t head;
1362 off_t tail;
1363 int error = 0;
1364
1365 KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1366 KASSERT(rw_write_held(&wl->wl_rwlock));
1367
1368 mutex_enter(&wl->wl_mtx);
1369
1370 /*
1371 * First check to see if we have to do a commit
1372 * at all.
1373 */
1374 avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1375 if (minfree < avail) {
1376 mutex_exit(&wl->wl_mtx);
1377 return 0;
1378 }
1379 minfree -= avail;
1380 while ((wl->wl_error_count == 0) &&
1381 (wl->wl_reclaimable_bytes < minfree)) {
1382 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1383 ("wapbl_truncate: sleeping on %p wl=%p bytes=%zd "
1384 "minfree=%zd\n",
1385 &wl->wl_reclaimable_bytes, wl, wl->wl_reclaimable_bytes,
1386 minfree));
1387
1388 cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1389 }
1390 if (wl->wl_reclaimable_bytes < minfree) {
1391 KASSERT(wl->wl_error_count);
1392 /* XXX maybe get actual error from buffer instead someday? */
1393 error = EIO;
1394 }
1395 head = wl->wl_head;
1396 tail = wl->wl_tail;
1397 delta = wl->wl_reclaimable_bytes;
1398
1399 /* If all of of the entries are flushed, then be sure to keep
1400 * the reserved bytes reserved. Watch out for discarded transactions,
1401 * which could leave more bytes reserved than are reclaimable.
1402 */
1403 if (SIMPLEQ_EMPTY(&wl->wl_entries) &&
1404 (delta >= wl->wl_reserved_bytes)) {
1405 delta -= wl->wl_reserved_bytes;
1406 }
1407 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1408 &tail);
1409 KDASSERT(wl->wl_reserved_bytes <=
1410 wapbl_space_used(wl->wl_circ_size, head, tail));
1411 mutex_exit(&wl->wl_mtx);
1412
1413 if (error)
1414 return error;
1415
1416 /*
1417 * This is where head, tail and delta are unprotected
1418 * from races against itself or flush. This is ok since
1419 * we only call this routine from inside flush itself.
1420 *
1421 * XXX: how can it race against itself when accessed only
1422 * from behind the write-locked rwlock?
1423 */
1424 error = wapbl_write_commit(wl, head, tail);
1425 if (error)
1426 return error;
1427
1428 wl->wl_head = head;
1429 wl->wl_tail = tail;
1430
1431 mutex_enter(&wl->wl_mtx);
1432 KASSERT(wl->wl_reclaimable_bytes >= delta);
1433 wl->wl_reclaimable_bytes -= delta;
1434 mutex_exit(&wl->wl_mtx);
1435 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1436 ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1437 curproc->p_pid, curlwp->l_lid, delta));
1438
1439 return 0;
1440 }
1441
1442 /****************************************************************/
1443
1444 void
1445 wapbl_biodone(struct buf *bp)
1446 {
1447 struct wapbl_entry *we = bp->b_private;
1448 struct wapbl *wl = we->we_wapbl;
1449 #ifdef WAPBL_DEBUG_BUFBYTES
1450 const int bufsize = bp->b_bufsize;
1451 #endif
1452
1453 /*
1454 * Handle possible flushing of buffers after log has been
1455 * decomissioned.
1456 */
1457 if (!wl) {
1458 KASSERT(we->we_bufcount > 0);
1459 we->we_bufcount--;
1460 #ifdef WAPBL_DEBUG_BUFBYTES
1461 KASSERT(we->we_unsynced_bufbytes >= bufsize);
1462 we->we_unsynced_bufbytes -= bufsize;
1463 #endif
1464
1465 if (we->we_bufcount == 0) {
1466 #ifdef WAPBL_DEBUG_BUFBYTES
1467 KASSERT(we->we_unsynced_bufbytes == 0);
1468 #endif
1469 pool_put(&wapbl_entry_pool, we);
1470 }
1471
1472 brelse(bp, 0);
1473 return;
1474 }
1475
1476 #ifdef ohbother
1477 KDASSERT(bp->b_oflags & BO_DONE);
1478 KDASSERT(!(bp->b_oflags & BO_DELWRI));
1479 KDASSERT(bp->b_flags & B_ASYNC);
1480 KDASSERT(bp->b_cflags & BC_BUSY);
1481 KDASSERT(!(bp->b_flags & B_LOCKED));
1482 KDASSERT(!(bp->b_flags & B_READ));
1483 KDASSERT(!(bp->b_cflags & BC_INVAL));
1484 KDASSERT(!(bp->b_cflags & BC_NOCACHE));
1485 #endif
1486
1487 if (bp->b_error) {
1488 /*
1489 * If an error occurs, it would be nice to leave the buffer
1490 * as a delayed write on the LRU queue so that we can retry
1491 * it later. But buffercache(9) can't handle dirty buffer
1492 * reuse, so just mark the log permanently errored out.
1493 */
1494 mutex_enter(&wl->wl_mtx);
1495 if (wl->wl_error_count == 0) {
1496 wl->wl_error_count++;
1497 cv_broadcast(&wl->wl_reclaimable_cv);
1498 }
1499 mutex_exit(&wl->wl_mtx);
1500 }
1501
1502 /*
1503 * Release the buffer here. wapbl_flush() may wait for the
1504 * log to become empty and we better unbusy the buffer before
1505 * wapbl_flush() returns.
1506 */
1507 brelse(bp, 0);
1508
1509 mutex_enter(&wl->wl_mtx);
1510
1511 KASSERT(we->we_bufcount > 0);
1512 we->we_bufcount--;
1513 #ifdef WAPBL_DEBUG_BUFBYTES
1514 KASSERT(we->we_unsynced_bufbytes >= bufsize);
1515 we->we_unsynced_bufbytes -= bufsize;
1516 KASSERT(wl->wl_unsynced_bufbytes >= bufsize);
1517 wl->wl_unsynced_bufbytes -= bufsize;
1518 #endif
1519 wl->wl_ev_metawrite.ev_count++;
1520
1521 /*
1522 * If the current transaction can be reclaimed, start
1523 * at the beginning and reclaim any consecutive reclaimable
1524 * transactions. If we successfully reclaim anything,
1525 * then wakeup anyone waiting for the reclaim.
1526 */
1527 if (we->we_bufcount == 0) {
1528 size_t delta = 0;
1529 int errcnt = 0;
1530 #ifdef WAPBL_DEBUG_BUFBYTES
1531 KDASSERT(we->we_unsynced_bufbytes == 0);
1532 #endif
1533 /*
1534 * clear any posted error, since the buffer it came from
1535 * has successfully flushed by now
1536 */
1537 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1538 (we->we_bufcount == 0)) {
1539 delta += we->we_reclaimable_bytes;
1540 if (we->we_error)
1541 errcnt++;
1542 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1543 pool_put(&wapbl_entry_pool, we);
1544 }
1545
1546 if (delta) {
1547 wl->wl_reclaimable_bytes += delta;
1548 KASSERT(wl->wl_error_count >= errcnt);
1549 wl->wl_error_count -= errcnt;
1550 cv_broadcast(&wl->wl_reclaimable_cv);
1551 }
1552 }
1553
1554 mutex_exit(&wl->wl_mtx);
1555 }
1556
1557 /*
1558 * wapbl_flush(wl, wait)
1559 *
1560 * Flush pending block writes, deallocations, and inodes from
1561 * the current transaction in memory to the log on disk:
1562 *
1563 * 1. Call the file system's wl_flush callback to flush any
1564 * per-file-system pending updates.
1565 * 2. Wait for enough space in the log for the current transaction.
1566 * 3. Synchronously write the new log records, advancing the
1567 * circular queue head.
1568 * 4. Issue the pending block writes asynchronously, now that they
1569 * are recorded in the log and can be replayed after crash.
1570 * 5. If wait is true, wait for all writes to complete and for the
1571 * log to become empty.
1572 *
1573 * On failure, call the file system's wl_flush_abort callback.
1574 */
1575 int
1576 wapbl_flush(struct wapbl *wl, int waitfor)
1577 {
1578 struct buf *bp;
1579 struct wapbl_entry *we;
1580 off_t off;
1581 off_t head;
1582 off_t tail;
1583 size_t delta = 0;
1584 size_t flushsize;
1585 size_t reserved;
1586 int error = 0;
1587
1588 /*
1589 * Do a quick check to see if a full flush can be skipped
1590 * This assumes that the flush callback does not need to be called
1591 * unless there are other outstanding bufs.
1592 */
1593 if (!waitfor) {
1594 size_t nbufs;
1595 mutex_enter(&wl->wl_mtx); /* XXX need mutex here to
1596 protect the KASSERTS */
1597 nbufs = wl->wl_bufcount;
1598 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1599 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1600 mutex_exit(&wl->wl_mtx);
1601 if (nbufs == 0)
1602 return 0;
1603 }
1604
1605 /*
1606 * XXX we may consider using LK_UPGRADE here
1607 * if we want to call flush from inside a transaction
1608 */
1609 rw_enter(&wl->wl_rwlock, RW_WRITER);
1610 wl->wl_flush(wl->wl_mount, TAILQ_FIRST(&wl->wl_dealloclist));
1611
1612 /*
1613 * Now that we are exclusively locked and the file system has
1614 * issued any deferred block writes for this transaction, check
1615 * whether there are any blocks to write to the log. If not,
1616 * skip waiting for space or writing any log entries.
1617 *
1618 * XXX Shouldn't this also check wl_dealloccnt and
1619 * wl_inohashcnt? Perhaps wl_dealloccnt doesn't matter if the
1620 * file system didn't produce any blocks as a consequence of
1621 * it, but the same does not seem to be so of wl_inohashcnt.
1622 */
1623 if (wl->wl_bufcount == 0) {
1624 goto wait_out;
1625 }
1626
1627 #if 0
1628 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1629 ("wapbl_flush thread %d.%d flushing entries with "
1630 "bufcount=%zu bufbytes=%zu\n",
1631 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1632 wl->wl_bufbytes));
1633 #endif
1634
1635 /* Calculate amount of space needed to flush */
1636 flushsize = wapbl_transaction_len(wl);
1637 if (wapbl_verbose_commit) {
1638 struct timespec ts;
1639 getnanotime(&ts);
1640 printf("%s: %lld.%09ld this transaction = %zu bytes\n",
1641 __func__, (long long)ts.tv_sec,
1642 (long)ts.tv_nsec, flushsize);
1643 }
1644
1645 if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1646 /*
1647 * XXX this could be handled more gracefully, perhaps place
1648 * only a partial transaction in the log and allow the
1649 * remaining to flush without the protection of the journal.
1650 */
1651 panic("wapbl_flush: current transaction too big to flush");
1652 }
1653
1654 error = wapbl_truncate(wl, flushsize);
1655 if (error)
1656 goto out;
1657
1658 off = wl->wl_head;
1659 KASSERT((off == 0) || (off >= wl->wl_circ_off));
1660 KASSERT((off == 0) || (off < wl->wl_circ_off + wl->wl_circ_size));
1661 error = wapbl_write_blocks(wl, &off);
1662 if (error)
1663 goto out;
1664 error = wapbl_write_revocations(wl, &off);
1665 if (error)
1666 goto out;
1667 error = wapbl_write_inodes(wl, &off);
1668 if (error)
1669 goto out;
1670
1671 reserved = 0;
1672 if (wl->wl_inohashcnt)
1673 reserved = wapbl_transaction_inodes_len(wl);
1674
1675 head = wl->wl_head;
1676 tail = wl->wl_tail;
1677
1678 wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1679 &head, &tail);
1680
1681 KASSERTMSG(head == off,
1682 "lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1683 " off=%"PRIdMAX" flush=%zu",
1684 (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1685 flushsize);
1686
1687 /* Opportunistically move the tail forward if we can */
1688 mutex_enter(&wl->wl_mtx);
1689 delta = wl->wl_reclaimable_bytes;
1690 mutex_exit(&wl->wl_mtx);
1691 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1692 &head, &tail);
1693
1694 error = wapbl_write_commit(wl, head, tail);
1695 if (error)
1696 goto out;
1697
1698 we = pool_get(&wapbl_entry_pool, PR_WAITOK);
1699
1700 #ifdef WAPBL_DEBUG_BUFBYTES
1701 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1702 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1703 " unsynced=%zu"
1704 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1705 "inodes=%d\n",
1706 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1707 wapbl_space_used(wl->wl_circ_size, head, tail),
1708 wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1709 wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1710 wl->wl_inohashcnt));
1711 #else
1712 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1713 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1714 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1715 "inodes=%d\n",
1716 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1717 wapbl_space_used(wl->wl_circ_size, head, tail),
1718 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1719 wl->wl_dealloccnt, wl->wl_inohashcnt));
1720 #endif
1721
1722
1723 mutex_enter(&bufcache_lock);
1724 mutex_enter(&wl->wl_mtx);
1725
1726 wl->wl_reserved_bytes = reserved;
1727 wl->wl_head = head;
1728 wl->wl_tail = tail;
1729 KASSERT(wl->wl_reclaimable_bytes >= delta);
1730 wl->wl_reclaimable_bytes -= delta;
1731 KDASSERT(wl->wl_dealloccnt == 0);
1732 #ifdef WAPBL_DEBUG_BUFBYTES
1733 wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1734 #endif
1735
1736 we->we_wapbl = wl;
1737 we->we_bufcount = wl->wl_bufcount;
1738 #ifdef WAPBL_DEBUG_BUFBYTES
1739 we->we_unsynced_bufbytes = wl->wl_bufbytes;
1740 #endif
1741 we->we_reclaimable_bytes = flushsize;
1742 we->we_error = 0;
1743 SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1744
1745 /*
1746 * this flushes bufs in reverse order than they were queued
1747 * it shouldn't matter, but if we care we could use TAILQ instead.
1748 * XXX Note they will get put on the lru queue when they flush
1749 * so we might actually want to change this to preserve order.
1750 */
1751 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
1752 if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1753 continue;
1754 }
1755 bp->b_iodone = wapbl_biodone;
1756 bp->b_private = we;
1757 bremfree(bp);
1758 wapbl_remove_buf_locked(wl, bp);
1759 mutex_exit(&wl->wl_mtx);
1760 mutex_exit(&bufcache_lock);
1761 bawrite(bp);
1762 mutex_enter(&bufcache_lock);
1763 mutex_enter(&wl->wl_mtx);
1764 }
1765 mutex_exit(&wl->wl_mtx);
1766 mutex_exit(&bufcache_lock);
1767
1768 #if 0
1769 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1770 ("wapbl_flush thread %d.%d done flushing entries...\n",
1771 curproc->p_pid, curlwp->l_lid));
1772 #endif
1773
1774 wait_out:
1775
1776 /*
1777 * If the waitfor flag is set, don't return until everything is
1778 * fully flushed and the on disk log is empty.
1779 */
1780 if (waitfor) {
1781 error = wapbl_truncate(wl, wl->wl_circ_size -
1782 wl->wl_reserved_bytes);
1783 }
1784
1785 out:
1786 if (error) {
1787 wl->wl_flush_abort(wl->wl_mount,
1788 TAILQ_FIRST(&wl->wl_dealloclist));
1789 }
1790
1791 #ifdef WAPBL_DEBUG_PRINT
1792 if (error) {
1793 pid_t pid = -1;
1794 lwpid_t lid = -1;
1795 if (curproc)
1796 pid = curproc->p_pid;
1797 if (curlwp)
1798 lid = curlwp->l_lid;
1799 mutex_enter(&wl->wl_mtx);
1800 #ifdef WAPBL_DEBUG_BUFBYTES
1801 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1802 ("wapbl_flush: thread %d.%d aborted flush: "
1803 "error = %d\n"
1804 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1805 "deallocs=%d inodes=%d\n"
1806 "\terrcnt = %d, reclaimable=%zu reserved=%zu "
1807 "unsynced=%zu\n",
1808 pid, lid, error, wl->wl_bufcount,
1809 wl->wl_bufbytes, wl->wl_bcount,
1810 wl->wl_dealloccnt, wl->wl_inohashcnt,
1811 wl->wl_error_count, wl->wl_reclaimable_bytes,
1812 wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1813 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1814 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1815 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1816 "error = %d, unsynced = %zu\n",
1817 we->we_bufcount, we->we_reclaimable_bytes,
1818 we->we_error, we->we_unsynced_bufbytes));
1819 }
1820 #else
1821 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1822 ("wapbl_flush: thread %d.%d aborted flush: "
1823 "error = %d\n"
1824 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1825 "deallocs=%d inodes=%d\n"
1826 "\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
1827 pid, lid, error, wl->wl_bufcount,
1828 wl->wl_bufbytes, wl->wl_bcount,
1829 wl->wl_dealloccnt, wl->wl_inohashcnt,
1830 wl->wl_error_count, wl->wl_reclaimable_bytes,
1831 wl->wl_reserved_bytes));
1832 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1833 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1834 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1835 "error = %d\n", we->we_bufcount,
1836 we->we_reclaimable_bytes, we->we_error));
1837 }
1838 #endif
1839 mutex_exit(&wl->wl_mtx);
1840 }
1841 #endif
1842
1843 rw_exit(&wl->wl_rwlock);
1844 return error;
1845 }
1846
1847 /****************************************************************/
1848
1849 void
1850 wapbl_jlock_assert(struct wapbl *wl)
1851 {
1852
1853 KASSERT(rw_lock_held(&wl->wl_rwlock));
1854 }
1855
1856 void
1857 wapbl_junlock_assert(struct wapbl *wl)
1858 {
1859
1860 KASSERT(!rw_write_held(&wl->wl_rwlock));
1861 }
1862
1863 /****************************************************************/
1864
1865 /* locks missing */
1866 void
1867 wapbl_print(struct wapbl *wl,
1868 int full,
1869 void (*pr)(const char *, ...))
1870 {
1871 struct buf *bp;
1872 struct wapbl_entry *we;
1873 (*pr)("wapbl %p", wl);
1874 (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
1875 wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
1876 (*pr)("circ = %zu, header = %zu, head = %"PRIdMAX" tail = %"PRIdMAX"\n",
1877 wl->wl_circ_size, wl->wl_circ_off,
1878 (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
1879 (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
1880 wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
1881 #ifdef WAPBL_DEBUG_BUFBYTES
1882 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1883 "reserved = %zu errcnt = %d unsynced = %zu\n",
1884 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1885 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1886 wl->wl_error_count, wl->wl_unsynced_bufbytes);
1887 #else
1888 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1889 "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
1890 wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1891 wl->wl_error_count);
1892 #endif
1893 (*pr)("\tdealloccnt = %d, dealloclim = %d\n",
1894 wl->wl_dealloccnt, wl->wl_dealloclim);
1895 (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
1896 wl->wl_inohashcnt, wl->wl_inohashmask);
1897 (*pr)("entries:\n");
1898 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1899 #ifdef WAPBL_DEBUG_BUFBYTES
1900 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
1901 "unsynced = %zu\n",
1902 we->we_bufcount, we->we_reclaimable_bytes,
1903 we->we_error, we->we_unsynced_bufbytes);
1904 #else
1905 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
1906 we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
1907 #endif
1908 }
1909 if (full) {
1910 int cnt = 0;
1911 (*pr)("bufs =");
1912 LIST_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
1913 if (!LIST_NEXT(bp, b_wapbllist)) {
1914 (*pr)(" %p", bp);
1915 } else if ((++cnt % 6) == 0) {
1916 (*pr)(" %p,\n\t", bp);
1917 } else {
1918 (*pr)(" %p,", bp);
1919 }
1920 }
1921 (*pr)("\n");
1922
1923 (*pr)("dealloced blks = ");
1924 {
1925 struct wapbl_dealloc *wd;
1926 cnt = 0;
1927 TAILQ_FOREACH(wd, &wl->wl_dealloclist, wd_entries) {
1928 (*pr)(" %"PRId64":%d,",
1929 wd->wd_blkno,
1930 wd->wd_len);
1931 if ((++cnt % 4) == 0) {
1932 (*pr)("\n\t");
1933 }
1934 }
1935 }
1936 (*pr)("\n");
1937
1938 (*pr)("registered inodes = ");
1939 {
1940 int i;
1941 cnt = 0;
1942 for (i = 0; i <= wl->wl_inohashmask; i++) {
1943 struct wapbl_ino_head *wih;
1944 struct wapbl_ino *wi;
1945
1946 wih = &wl->wl_inohash[i];
1947 LIST_FOREACH(wi, wih, wi_hash) {
1948 if (wi->wi_ino == 0)
1949 continue;
1950 (*pr)(" %"PRIu64"/0%06"PRIo32",",
1951 wi->wi_ino, wi->wi_mode);
1952 if ((++cnt % 4) == 0) {
1953 (*pr)("\n\t");
1954 }
1955 }
1956 }
1957 (*pr)("\n");
1958 }
1959 }
1960 }
1961
1962 #if defined(WAPBL_DEBUG) || defined(DDB)
1963 void
1964 wapbl_dump(struct wapbl *wl)
1965 {
1966 #if defined(WAPBL_DEBUG)
1967 if (!wl)
1968 wl = wapbl_debug_wl;
1969 #endif
1970 if (!wl)
1971 return;
1972 wapbl_print(wl, 1, printf);
1973 }
1974 #endif
1975
1976 /****************************************************************/
1977
1978 int
1979 wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len, bool force,
1980 void **cookiep)
1981 {
1982 struct wapbl_dealloc *wd;
1983 int error = 0;
1984
1985 wapbl_jlock_assert(wl);
1986
1987 mutex_enter(&wl->wl_mtx);
1988
1989 if (__predict_false(wl->wl_dealloccnt >= wl->wl_dealloclim)) {
1990 if (!force) {
1991 error = EAGAIN;
1992 goto out;
1993 }
1994
1995 /*
1996 * Forced registration can only be used when:
1997 * 1) the caller can't cope with failure
1998 * 2) the path can be triggered only bounded, small
1999 * times per transaction
2000 * If this is not fullfilled, and the path would be triggered
2001 * many times, this could overflow maximum transaction size
2002 * and panic later.
2003 */
2004 printf("%s: forced dealloc registration over limit: %d >= %d\n",
2005 wl->wl_mount->mnt_stat.f_mntonname,
2006 wl->wl_dealloccnt, wl->wl_dealloclim);
2007 }
2008
2009 wl->wl_dealloccnt++;
2010 mutex_exit(&wl->wl_mtx);
2011
2012 wd = pool_get(&wapbl_dealloc_pool, PR_WAITOK);
2013 wd->wd_blkno = blk;
2014 wd->wd_len = len;
2015
2016 mutex_enter(&wl->wl_mtx);
2017 TAILQ_INSERT_TAIL(&wl->wl_dealloclist, wd, wd_entries);
2018
2019 if (cookiep)
2020 *cookiep = wd;
2021
2022 out:
2023 mutex_exit(&wl->wl_mtx);
2024
2025 WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
2026 ("wapbl_register_deallocation: blk=%"PRId64" len=%d error=%d\n",
2027 blk, len, error));
2028
2029 return error;
2030 }
2031
2032 static void
2033 wapbl_deallocation_free(struct wapbl *wl, struct wapbl_dealloc *wd,
2034 bool locked)
2035 {
2036 KASSERT(!locked
2037 || rw_lock_held(&wl->wl_rwlock) || mutex_owned(&wl->wl_mtx));
2038
2039 if (!locked)
2040 mutex_enter(&wl->wl_mtx);
2041
2042 TAILQ_REMOVE(&wl->wl_dealloclist, wd, wd_entries);
2043 wl->wl_dealloccnt--;
2044
2045 if (!locked)
2046 mutex_exit(&wl->wl_mtx);
2047
2048 pool_put(&wapbl_dealloc_pool, wd);
2049 }
2050
2051 void
2052 wapbl_unregister_deallocation(struct wapbl *wl, void *cookie)
2053 {
2054 KASSERT(cookie != NULL);
2055 wapbl_deallocation_free(wl, cookie, false);
2056 }
2057
2058 /****************************************************************/
2059
2060 static void
2061 wapbl_inodetrk_init(struct wapbl *wl, u_int size)
2062 {
2063
2064 wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
2065 if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) {
2066 pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0,
2067 "wapblinopl", &pool_allocator_nointr, IPL_NONE);
2068 }
2069 }
2070
2071 static void
2072 wapbl_inodetrk_free(struct wapbl *wl)
2073 {
2074
2075 /* XXX this KASSERT needs locking/mutex analysis */
2076 KASSERT(wl->wl_inohashcnt == 0);
2077 hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
2078 if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) {
2079 pool_destroy(&wapbl_ino_pool);
2080 }
2081 }
2082
2083 static struct wapbl_ino *
2084 wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
2085 {
2086 struct wapbl_ino_head *wih;
2087 struct wapbl_ino *wi;
2088
2089 KASSERT(mutex_owned(&wl->wl_mtx));
2090
2091 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
2092 LIST_FOREACH(wi, wih, wi_hash) {
2093 if (ino == wi->wi_ino)
2094 return wi;
2095 }
2096 return 0;
2097 }
2098
2099 void
2100 wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
2101 {
2102 struct wapbl_ino_head *wih;
2103 struct wapbl_ino *wi;
2104
2105 wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
2106
2107 mutex_enter(&wl->wl_mtx);
2108 if (wapbl_inodetrk_get(wl, ino) == NULL) {
2109 wi->wi_ino = ino;
2110 wi->wi_mode = mode;
2111 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
2112 LIST_INSERT_HEAD(wih, wi, wi_hash);
2113 wl->wl_inohashcnt++;
2114 WAPBL_PRINTF(WAPBL_PRINT_INODE,
2115 ("wapbl_register_inode: ino=%"PRId64"\n", ino));
2116 mutex_exit(&wl->wl_mtx);
2117 } else {
2118 mutex_exit(&wl->wl_mtx);
2119 pool_put(&wapbl_ino_pool, wi);
2120 }
2121 }
2122
2123 void
2124 wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
2125 {
2126 struct wapbl_ino *wi;
2127
2128 mutex_enter(&wl->wl_mtx);
2129 wi = wapbl_inodetrk_get(wl, ino);
2130 if (wi) {
2131 WAPBL_PRINTF(WAPBL_PRINT_INODE,
2132 ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
2133 KASSERT(wl->wl_inohashcnt > 0);
2134 wl->wl_inohashcnt--;
2135 LIST_REMOVE(wi, wi_hash);
2136 mutex_exit(&wl->wl_mtx);
2137
2138 pool_put(&wapbl_ino_pool, wi);
2139 } else {
2140 mutex_exit(&wl->wl_mtx);
2141 }
2142 }
2143
2144 /****************************************************************/
2145
2146 /*
2147 * wapbl_transaction_inodes_len(wl)
2148 *
2149 * Calculate the number of bytes required for inode registration
2150 * log records in wl.
2151 */
2152 static inline size_t
2153 wapbl_transaction_inodes_len(struct wapbl *wl)
2154 {
2155 int blocklen = 1<<wl->wl_log_dev_bshift;
2156 int iph;
2157
2158 /* Calculate number of inodes described in a inodelist header */
2159 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2160 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2161
2162 KASSERT(iph > 0);
2163
2164 return MAX(1, howmany(wl->wl_inohashcnt, iph)) * blocklen;
2165 }
2166
2167
2168 /*
2169 * wapbl_transaction_len(wl)
2170 *
2171 * Calculate number of bytes required for all log records in wl.
2172 */
2173 static size_t
2174 wapbl_transaction_len(struct wapbl *wl)
2175 {
2176 int blocklen = 1<<wl->wl_log_dev_bshift;
2177 size_t len;
2178
2179 /* Calculate number of blocks described in a blocklist header */
2180 len = wl->wl_bcount;
2181 len += howmany(wl->wl_bufcount, wl->wl_brperjblock) * blocklen;
2182 len += howmany(wl->wl_dealloccnt, wl->wl_brperjblock) * blocklen;
2183 len += wapbl_transaction_inodes_len(wl);
2184
2185 return len;
2186 }
2187
2188 /*
2189 * wapbl_cache_sync(wl, msg)
2190 *
2191 * Issue DIOCCACHESYNC to wl->wl_devvp.
2192 *
2193 * If sysctl(vfs.wapbl.verbose_commit) >= 2, print a message
2194 * including msg about the duration of the cache sync.
2195 */
2196 static int
2197 wapbl_cache_sync(struct wapbl *wl, const char *msg)
2198 {
2199 const bool verbose = wapbl_verbose_commit >= 2;
2200 struct bintime start_time;
2201 int force = 1;
2202 int error;
2203
2204 if (!wapbl_flush_disk_cache) {
2205 return 0;
2206 }
2207 if (verbose) {
2208 bintime(&start_time);
2209 }
2210 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force,
2211 FWRITE, FSCRED);
2212 if (error) {
2213 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
2214 ("wapbl_cache_sync: DIOCCACHESYNC on dev 0x%jx "
2215 "returned %d\n", (uintmax_t)wl->wl_devvp->v_rdev, error));
2216 }
2217 if (verbose) {
2218 struct bintime d;
2219 struct timespec ts;
2220
2221 bintime(&d);
2222 bintime_sub(&d, &start_time);
2223 bintime2timespec(&d, &ts);
2224 printf("wapbl_cache_sync: %s: dev 0x%jx %ju.%09lu\n",
2225 msg, (uintmax_t)wl->wl_devvp->v_rdev,
2226 (uintmax_t)ts.tv_sec, ts.tv_nsec);
2227 }
2228
2229 wl->wl_ev_cacheflush.ev_count++;
2230
2231 return error;
2232 }
2233
2234 /*
2235 * wapbl_write_commit(wl, head, tail)
2236 *
2237 * Issue a disk cache sync to wait for all pending writes to the
2238 * log to complete, and then synchronously commit the current
2239 * circular queue head and tail to the log, in the next of two
2240 * locations for commit headers on disk.
2241 *
2242 * Increment the generation number. If the generation number
2243 * rolls over to zero, then a subsequent commit would appear to
2244 * have an older generation than this one -- in that case, issue a
2245 * duplicate commit to avoid this.
2246 *
2247 * => Caller must have exclusive access to wl, either by holding
2248 * wl->wl_rwlock for writer or by being wapbl_start before anyone
2249 * else has seen wl.
2250 */
2251 static int
2252 wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
2253 {
2254 struct wapbl_wc_header *wc = wl->wl_wc_header;
2255 struct timespec ts;
2256 int error;
2257 daddr_t pbn;
2258
2259 error = wapbl_buffered_flush(wl);
2260 if (error)
2261 return error;
2262 /*
2263 * flush disk cache to ensure that blocks we've written are actually
2264 * written to the stable storage before the commit header.
2265 *
2266 * XXX Calc checksum here, instead we do this for now
2267 */
2268 wapbl_cache_sync(wl, "1");
2269
2270 wc->wc_head = head;
2271 wc->wc_tail = tail;
2272 wc->wc_checksum = 0;
2273 wc->wc_version = 1;
2274 getnanotime(&ts);
2275 wc->wc_time = ts.tv_sec;
2276 wc->wc_timensec = ts.tv_nsec;
2277
2278 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2279 ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
2280 (intmax_t)head, (intmax_t)tail));
2281
2282 /*
2283 * write the commit header.
2284 *
2285 * XXX if generation will rollover, then first zero
2286 * over second commit header before trying to write both headers.
2287 */
2288
2289 pbn = wl->wl_logpbn + (wc->wc_generation % 2);
2290 #ifdef _KERNEL
2291 pbn = btodb(pbn << wc->wc_log_dev_bshift);
2292 #endif
2293 error = wapbl_buffered_write(wc, wc->wc_len, wl, pbn);
2294 if (error)
2295 return error;
2296 error = wapbl_buffered_flush(wl);
2297 if (error)
2298 return error;
2299
2300 /*
2301 * flush disk cache to ensure that the commit header is actually
2302 * written before meta data blocks.
2303 */
2304 wapbl_cache_sync(wl, "2");
2305
2306 /*
2307 * If the generation number was zero, write it out a second time.
2308 * This handles initialization and generation number rollover
2309 */
2310 if (wc->wc_generation++ == 0) {
2311 error = wapbl_write_commit(wl, head, tail);
2312 /*
2313 * This panic should be able to be removed if we do the
2314 * zero'ing mentioned above, and we are certain to roll
2315 * back generation number on failure.
2316 */
2317 if (error)
2318 panic("wapbl_write_commit: error writing duplicate "
2319 "log header: %d", error);
2320 }
2321
2322 wl->wl_ev_commit.ev_count++;
2323
2324 return 0;
2325 }
2326
2327 /*
2328 * wapbl_write_blocks(wl, offp)
2329 *
2330 * Write all pending physical blocks in the current transaction
2331 * from wapbl_add_buf to the log on disk, adding to the circular
2332 * queue head at byte offset *offp, and returning the new head's
2333 * byte offset in *offp.
2334 */
2335 static int
2336 wapbl_write_blocks(struct wapbl *wl, off_t *offp)
2337 {
2338 struct wapbl_wc_blocklist *wc =
2339 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2340 int blocklen = 1<<wl->wl_log_dev_bshift;
2341 struct buf *bp;
2342 off_t off = *offp;
2343 int error;
2344 size_t padding;
2345
2346 KASSERT(rw_write_held(&wl->wl_rwlock));
2347
2348 bp = LIST_FIRST(&wl->wl_bufs);
2349
2350 while (bp) {
2351 int cnt;
2352 struct buf *obp = bp;
2353
2354 KASSERT(bp->b_flags & B_LOCKED);
2355
2356 wc->wc_type = WAPBL_WC_BLOCKS;
2357 wc->wc_len = blocklen;
2358 wc->wc_blkcount = 0;
2359 while (bp && (wc->wc_blkcount < wl->wl_brperjblock)) {
2360 /*
2361 * Make sure all the physical block numbers are up to
2362 * date. If this is not always true on a given
2363 * filesystem, then VOP_BMAP must be called. We
2364 * could call VOP_BMAP here, or else in the filesystem
2365 * specific flush callback, although neither of those
2366 * solutions allow us to take the vnode lock. If a
2367 * filesystem requires that we must take the vnode lock
2368 * to call VOP_BMAP, then we can probably do it in
2369 * bwrite when the vnode lock should already be held
2370 * by the invoking code.
2371 */
2372 KASSERT((bp->b_vp->v_type == VBLK) ||
2373 (bp->b_blkno != bp->b_lblkno));
2374 KASSERT(bp->b_blkno > 0);
2375
2376 wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
2377 wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
2378 wc->wc_len += bp->b_bcount;
2379 wc->wc_blkcount++;
2380 bp = LIST_NEXT(bp, b_wapbllist);
2381 }
2382 if (wc->wc_len % blocklen != 0) {
2383 padding = blocklen - wc->wc_len % blocklen;
2384 wc->wc_len += padding;
2385 } else {
2386 padding = 0;
2387 }
2388
2389 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2390 ("wapbl_write_blocks: len = %u (padding %zu) off = %"PRIdMAX"\n",
2391 wc->wc_len, padding, (intmax_t)off));
2392
2393 error = wapbl_circ_write(wl, wc, blocklen, &off);
2394 if (error)
2395 return error;
2396 bp = obp;
2397 cnt = 0;
2398 while (bp && (cnt++ < wl->wl_brperjblock)) {
2399 error = wapbl_circ_write(wl, bp->b_data,
2400 bp->b_bcount, &off);
2401 if (error)
2402 return error;
2403 bp = LIST_NEXT(bp, b_wapbllist);
2404 }
2405 if (padding) {
2406 void *zero;
2407
2408 zero = wapbl_alloc(padding);
2409 memset(zero, 0, padding);
2410 error = wapbl_circ_write(wl, zero, padding, &off);
2411 wapbl_free(zero, padding);
2412 if (error)
2413 return error;
2414 }
2415 }
2416 *offp = off;
2417 return 0;
2418 }
2419
2420 /*
2421 * wapbl_write_revocations(wl, offp)
2422 *
2423 * Write all pending deallocations in the current transaction from
2424 * wapbl_register_deallocation to the log on disk, adding to the
2425 * circular queue's head at byte offset *offp, and returning the
2426 * new head's byte offset in *offp.
2427 */
2428 static int
2429 wapbl_write_revocations(struct wapbl *wl, off_t *offp)
2430 {
2431 struct wapbl_wc_blocklist *wc =
2432 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2433 struct wapbl_dealloc *wd, *lwd;
2434 int blocklen = 1<<wl->wl_log_dev_bshift;
2435 off_t off = *offp;
2436 int error;
2437
2438 KASSERT(rw_write_held(&wl->wl_rwlock));
2439
2440 if (wl->wl_dealloccnt == 0)
2441 return 0;
2442
2443 while ((wd = TAILQ_FIRST(&wl->wl_dealloclist)) != NULL) {
2444 wc->wc_type = WAPBL_WC_REVOCATIONS;
2445 wc->wc_len = blocklen;
2446 wc->wc_blkcount = 0;
2447 while (wd && (wc->wc_blkcount < wl->wl_brperjblock)) {
2448 wc->wc_blocks[wc->wc_blkcount].wc_daddr =
2449 wd->wd_blkno;
2450 wc->wc_blocks[wc->wc_blkcount].wc_dlen =
2451 wd->wd_len;
2452 wc->wc_blkcount++;
2453
2454 wd = TAILQ_NEXT(wd, wd_entries);
2455 }
2456 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2457 ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
2458 wc->wc_len, (intmax_t)off));
2459 error = wapbl_circ_write(wl, wc, blocklen, &off);
2460 if (error)
2461 return error;
2462
2463 /* free all successfully written deallocs */
2464 lwd = wd;
2465 while ((wd = TAILQ_FIRST(&wl->wl_dealloclist)) != NULL) {
2466 if (wd == lwd)
2467 break;
2468 wapbl_deallocation_free(wl, wd, true);
2469 }
2470 }
2471 *offp = off;
2472 return 0;
2473 }
2474
2475 /*
2476 * wapbl_write_inodes(wl, offp)
2477 *
2478 * Write all pending inode allocations in the current transaction
2479 * from wapbl_register_inode to the log on disk, adding to the
2480 * circular queue's head at byte offset *offp and returning the
2481 * new head's byte offset in *offp.
2482 */
2483 static int
2484 wapbl_write_inodes(struct wapbl *wl, off_t *offp)
2485 {
2486 struct wapbl_wc_inodelist *wc =
2487 (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
2488 int i;
2489 int blocklen = 1 << wl->wl_log_dev_bshift;
2490 off_t off = *offp;
2491 int error;
2492
2493 struct wapbl_ino_head *wih;
2494 struct wapbl_ino *wi;
2495 int iph;
2496
2497 KASSERT(rw_write_held(&wl->wl_rwlock));
2498
2499 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2500 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2501
2502 i = 0;
2503 wih = &wl->wl_inohash[0];
2504 wi = 0;
2505 do {
2506 wc->wc_type = WAPBL_WC_INODES;
2507 wc->wc_len = blocklen;
2508 wc->wc_inocnt = 0;
2509 wc->wc_clear = (i == 0);
2510 while ((i < wl->wl_inohashcnt) && (wc->wc_inocnt < iph)) {
2511 while (!wi) {
2512 KASSERT((wih - &wl->wl_inohash[0])
2513 <= wl->wl_inohashmask);
2514 wi = LIST_FIRST(wih++);
2515 }
2516 wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2517 wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2518 wc->wc_inocnt++;
2519 i++;
2520 wi = LIST_NEXT(wi, wi_hash);
2521 }
2522 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2523 ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2524 wc->wc_len, (intmax_t)off));
2525 error = wapbl_circ_write(wl, wc, blocklen, &off);
2526 if (error)
2527 return error;
2528 } while (i < wl->wl_inohashcnt);
2529
2530 *offp = off;
2531 return 0;
2532 }
2533
2534 #endif /* _KERNEL */
2535
2536 /****************************************************************/
2537
2538 struct wapbl_blk {
2539 LIST_ENTRY(wapbl_blk) wb_hash;
2540 daddr_t wb_blk;
2541 off_t wb_off; /* Offset of this block in the log */
2542 };
2543 #define WAPBL_BLKPOOL_MIN 83
2544
2545 static void
2546 wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2547 {
2548 if (size < WAPBL_BLKPOOL_MIN)
2549 size = WAPBL_BLKPOOL_MIN;
2550 KASSERT(wr->wr_blkhash == 0);
2551 #ifdef _KERNEL
2552 wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2553 #else /* ! _KERNEL */
2554 /* Manually implement hashinit */
2555 {
2556 unsigned long i, hashsize;
2557 for (hashsize = 1; hashsize < size; hashsize <<= 1)
2558 continue;
2559 wr->wr_blkhash = wapbl_alloc(hashsize * sizeof(*wr->wr_blkhash));
2560 for (i = 0; i < hashsize; i++)
2561 LIST_INIT(&wr->wr_blkhash[i]);
2562 wr->wr_blkhashmask = hashsize - 1;
2563 }
2564 #endif /* ! _KERNEL */
2565 }
2566
2567 static void
2568 wapbl_blkhash_free(struct wapbl_replay *wr)
2569 {
2570 KASSERT(wr->wr_blkhashcnt == 0);
2571 #ifdef _KERNEL
2572 hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2573 #else /* ! _KERNEL */
2574 wapbl_free(wr->wr_blkhash,
2575 (wr->wr_blkhashmask + 1) * sizeof(*wr->wr_blkhash));
2576 #endif /* ! _KERNEL */
2577 }
2578
2579 static struct wapbl_blk *
2580 wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2581 {
2582 struct wapbl_blk_head *wbh;
2583 struct wapbl_blk *wb;
2584 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2585 LIST_FOREACH(wb, wbh, wb_hash) {
2586 if (blk == wb->wb_blk)
2587 return wb;
2588 }
2589 return 0;
2590 }
2591
2592 static void
2593 wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2594 {
2595 struct wapbl_blk_head *wbh;
2596 struct wapbl_blk *wb;
2597 wb = wapbl_blkhash_get(wr, blk);
2598 if (wb) {
2599 KASSERT(wb->wb_blk == blk);
2600 wb->wb_off = off;
2601 } else {
2602 wb = wapbl_alloc(sizeof(*wb));
2603 wb->wb_blk = blk;
2604 wb->wb_off = off;
2605 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2606 LIST_INSERT_HEAD(wbh, wb, wb_hash);
2607 wr->wr_blkhashcnt++;
2608 }
2609 }
2610
2611 static void
2612 wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2613 {
2614 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2615 if (wb) {
2616 KASSERT(wr->wr_blkhashcnt > 0);
2617 wr->wr_blkhashcnt--;
2618 LIST_REMOVE(wb, wb_hash);
2619 wapbl_free(wb, sizeof(*wb));
2620 }
2621 }
2622
2623 static void
2624 wapbl_blkhash_clear(struct wapbl_replay *wr)
2625 {
2626 unsigned long i;
2627 for (i = 0; i <= wr->wr_blkhashmask; i++) {
2628 struct wapbl_blk *wb;
2629
2630 while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2631 KASSERT(wr->wr_blkhashcnt > 0);
2632 wr->wr_blkhashcnt--;
2633 LIST_REMOVE(wb, wb_hash);
2634 wapbl_free(wb, sizeof(*wb));
2635 }
2636 }
2637 KASSERT(wr->wr_blkhashcnt == 0);
2638 }
2639
2640 /****************************************************************/
2641
2642 /*
2643 * wapbl_circ_read(wr, data, len, offp)
2644 *
2645 * Read len bytes into data from the circular queue of wr,
2646 * starting at the linear byte offset *offp, and returning the new
2647 * linear byte offset in *offp.
2648 *
2649 * If the starting linear byte offset precedes wr->wr_circ_off,
2650 * the read instead begins at wr->wr_circ_off. XXX WTF? This
2651 * should be a KASSERT, not a conditional.
2652 */
2653 static int
2654 wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2655 {
2656 size_t slen;
2657 off_t off = *offp;
2658 int error;
2659 daddr_t pbn;
2660
2661 KASSERT(((len >> wr->wr_log_dev_bshift) <<
2662 wr->wr_log_dev_bshift) == len);
2663
2664 if (off < wr->wr_circ_off)
2665 off = wr->wr_circ_off;
2666 slen = wr->wr_circ_off + wr->wr_circ_size - off;
2667 if (slen < len) {
2668 pbn = wr->wr_logpbn + (off >> wr->wr_log_dev_bshift);
2669 #ifdef _KERNEL
2670 pbn = btodb(pbn << wr->wr_log_dev_bshift);
2671 #endif
2672 error = wapbl_read(data, slen, wr->wr_devvp, pbn);
2673 if (error)
2674 return error;
2675 data = (uint8_t *)data + slen;
2676 len -= slen;
2677 off = wr->wr_circ_off;
2678 }
2679 pbn = wr->wr_logpbn + (off >> wr->wr_log_dev_bshift);
2680 #ifdef _KERNEL
2681 pbn = btodb(pbn << wr->wr_log_dev_bshift);
2682 #endif
2683 error = wapbl_read(data, len, wr->wr_devvp, pbn);
2684 if (error)
2685 return error;
2686 off += len;
2687 if (off >= wr->wr_circ_off + wr->wr_circ_size)
2688 off = wr->wr_circ_off;
2689 *offp = off;
2690 return 0;
2691 }
2692
2693 /*
2694 * wapbl_circ_advance(wr, len, offp)
2695 *
2696 * Compute the linear byte offset of the circular queue of wr that
2697 * is len bytes past *offp, and store it in *offp.
2698 *
2699 * This is as if wapbl_circ_read, but without actually reading
2700 * anything.
2701 *
2702 * If the starting linear byte offset precedes wr->wr_circ_off, it
2703 * is taken to be wr->wr_circ_off instead. XXX WTF? This should
2704 * be a KASSERT, not a conditional.
2705 */
2706 static void
2707 wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2708 {
2709 size_t slen;
2710 off_t off = *offp;
2711
2712 KASSERT(((len >> wr->wr_log_dev_bshift) <<
2713 wr->wr_log_dev_bshift) == len);
2714
2715 if (off < wr->wr_circ_off)
2716 off = wr->wr_circ_off;
2717 slen = wr->wr_circ_off + wr->wr_circ_size - off;
2718 if (slen < len) {
2719 len -= slen;
2720 off = wr->wr_circ_off;
2721 }
2722 off += len;
2723 if (off >= wr->wr_circ_off + wr->wr_circ_size)
2724 off = wr->wr_circ_off;
2725 *offp = off;
2726 }
2727
2728 /****************************************************************/
2729
2730 int
2731 wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2732 daddr_t off, size_t count, size_t blksize)
2733 {
2734 struct wapbl_replay *wr;
2735 int error;
2736 struct vnode *devvp;
2737 daddr_t logpbn;
2738 uint8_t *scratch;
2739 struct wapbl_wc_header *wch;
2740 struct wapbl_wc_header *wch2;
2741 /* Use this until we read the actual log header */
2742 int log_dev_bshift = ilog2(blksize);
2743 size_t used;
2744 daddr_t pbn;
2745
2746 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2747 ("wapbl_replay_start: vp=%p off=%"PRId64 " count=%zu blksize=%zu\n",
2748 vp, off, count, blksize));
2749
2750 if (off < 0)
2751 return EINVAL;
2752
2753 if (blksize < DEV_BSIZE)
2754 return EINVAL;
2755 if (blksize % DEV_BSIZE)
2756 return EINVAL;
2757
2758 #ifdef _KERNEL
2759 #if 0
2760 /* XXX vp->v_size isn't reliably set for VBLK devices,
2761 * especially root. However, we might still want to verify
2762 * that the full load is readable */
2763 if ((off + count) * blksize > vp->v_size)
2764 return EINVAL;
2765 #endif
2766 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2767 return error;
2768 }
2769 #else /* ! _KERNEL */
2770 devvp = vp;
2771 logpbn = off;
2772 #endif /* ! _KERNEL */
2773
2774 scratch = wapbl_alloc(MAXBSIZE);
2775
2776 pbn = logpbn;
2777 #ifdef _KERNEL
2778 pbn = btodb(pbn << log_dev_bshift);
2779 #endif
2780 error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, pbn);
2781 if (error)
2782 goto errout;
2783
2784 wch = (struct wapbl_wc_header *)scratch;
2785 wch2 =
2786 (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
2787 /* XXX verify checksums and magic numbers */
2788 if (wch->wc_type != WAPBL_WC_HEADER) {
2789 printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
2790 error = EFTYPE;
2791 goto errout;
2792 }
2793
2794 if (wch2->wc_generation > wch->wc_generation)
2795 wch = wch2;
2796
2797 wr = wapbl_calloc(1, sizeof(*wr));
2798
2799 wr->wr_logvp = vp;
2800 wr->wr_devvp = devvp;
2801 wr->wr_logpbn = logpbn;
2802
2803 wr->wr_scratch = scratch;
2804
2805 wr->wr_log_dev_bshift = wch->wc_log_dev_bshift;
2806 wr->wr_fs_dev_bshift = wch->wc_fs_dev_bshift;
2807 wr->wr_circ_off = wch->wc_circ_off;
2808 wr->wr_circ_size = wch->wc_circ_size;
2809 wr->wr_generation = wch->wc_generation;
2810
2811 used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
2812
2813 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2814 ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
2815 " len=%"PRId64" used=%zu\n",
2816 wch->wc_head, wch->wc_tail, wch->wc_circ_off,
2817 wch->wc_circ_size, used));
2818
2819 wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
2820
2821 error = wapbl_replay_process(wr, wch->wc_head, wch->wc_tail);
2822 if (error) {
2823 wapbl_replay_stop(wr);
2824 wapbl_replay_free(wr);
2825 return error;
2826 }
2827
2828 *wrp = wr;
2829 return 0;
2830
2831 errout:
2832 wapbl_free(scratch, MAXBSIZE);
2833 return error;
2834 }
2835
2836 void
2837 wapbl_replay_stop(struct wapbl_replay *wr)
2838 {
2839
2840 if (!wapbl_replay_isopen(wr))
2841 return;
2842
2843 WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
2844
2845 wapbl_free(wr->wr_scratch, MAXBSIZE);
2846 wr->wr_scratch = NULL;
2847
2848 wr->wr_logvp = NULL;
2849
2850 wapbl_blkhash_clear(wr);
2851 wapbl_blkhash_free(wr);
2852 }
2853
2854 void
2855 wapbl_replay_free(struct wapbl_replay *wr)
2856 {
2857
2858 KDASSERT(!wapbl_replay_isopen(wr));
2859
2860 if (wr->wr_inodes)
2861 wapbl_free(wr->wr_inodes,
2862 wr->wr_inodescnt * sizeof(wr->wr_inodes[0]));
2863 wapbl_free(wr, sizeof(*wr));
2864 }
2865
2866 #ifdef _KERNEL
2867 int
2868 wapbl_replay_isopen1(struct wapbl_replay *wr)
2869 {
2870
2871 return wapbl_replay_isopen(wr);
2872 }
2873 #endif
2874
2875 /*
2876 * calculate the disk address for the i'th block in the wc_blockblist
2877 * offset by j blocks of size blen.
2878 *
2879 * wc_daddr is always a kernel disk address in DEV_BSIZE units that
2880 * was written to the journal.
2881 *
2882 * The kernel needs that address plus the offset in DEV_BSIZE units.
2883 *
2884 * Userland needs that address plus the offset in blen units.
2885 *
2886 */
2887 static daddr_t
2888 wapbl_block_daddr(struct wapbl_wc_blocklist *wc, int i, int j, int blen)
2889 {
2890 daddr_t pbn;
2891
2892 #ifdef _KERNEL
2893 pbn = wc->wc_blocks[i].wc_daddr + btodb(j * blen);
2894 #else
2895 pbn = dbtob(wc->wc_blocks[i].wc_daddr) / blen + j;
2896 #endif
2897
2898 return pbn;
2899 }
2900
2901 static void
2902 wapbl_replay_process_blocks(struct wapbl_replay *wr, off_t *offp)
2903 {
2904 struct wapbl_wc_blocklist *wc =
2905 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2906 int fsblklen = 1 << wr->wr_fs_dev_bshift;
2907 int i, j, n;
2908
2909 for (i = 0; i < wc->wc_blkcount; i++) {
2910 /*
2911 * Enter each physical block into the hashtable independently.
2912 */
2913 n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
2914 for (j = 0; j < n; j++) {
2915 wapbl_blkhash_ins(wr, wapbl_block_daddr(wc, i, j, fsblklen),
2916 *offp);
2917 wapbl_circ_advance(wr, fsblklen, offp);
2918 }
2919 }
2920 }
2921
2922 static void
2923 wapbl_replay_process_revocations(struct wapbl_replay *wr)
2924 {
2925 struct wapbl_wc_blocklist *wc =
2926 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2927 int fsblklen = 1 << wr->wr_fs_dev_bshift;
2928 int i, j, n;
2929
2930 for (i = 0; i < wc->wc_blkcount; i++) {
2931 /*
2932 * Remove any blocks found from the hashtable.
2933 */
2934 n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
2935 for (j = 0; j < n; j++)
2936 wapbl_blkhash_rem(wr, wapbl_block_daddr(wc, i, j, fsblklen));
2937 }
2938 }
2939
2940 static void
2941 wapbl_replay_process_inodes(struct wapbl_replay *wr, off_t oldoff, off_t newoff)
2942 {
2943 struct wapbl_wc_inodelist *wc =
2944 (struct wapbl_wc_inodelist *)wr->wr_scratch;
2945 void *new_inodes;
2946 const size_t oldsize = wr->wr_inodescnt * sizeof(wr->wr_inodes[0]);
2947
2948 KASSERT(sizeof(wr->wr_inodes[0]) == sizeof(wc->wc_inodes[0]));
2949
2950 /*
2951 * Keep track of where we found this so location won't be
2952 * overwritten.
2953 */
2954 if (wc->wc_clear) {
2955 wr->wr_inodestail = oldoff;
2956 wr->wr_inodescnt = 0;
2957 if (wr->wr_inodes != NULL) {
2958 wapbl_free(wr->wr_inodes, oldsize);
2959 wr->wr_inodes = NULL;
2960 }
2961 }
2962 wr->wr_inodeshead = newoff;
2963 if (wc->wc_inocnt == 0)
2964 return;
2965
2966 new_inodes = wapbl_alloc((wr->wr_inodescnt + wc->wc_inocnt) *
2967 sizeof(wr->wr_inodes[0]));
2968 if (wr->wr_inodes != NULL) {
2969 memcpy(new_inodes, wr->wr_inodes, oldsize);
2970 wapbl_free(wr->wr_inodes, oldsize);
2971 }
2972 wr->wr_inodes = new_inodes;
2973 memcpy(&wr->wr_inodes[wr->wr_inodescnt], wc->wc_inodes,
2974 wc->wc_inocnt * sizeof(wr->wr_inodes[0]));
2975 wr->wr_inodescnt += wc->wc_inocnt;
2976 }
2977
2978 static int
2979 wapbl_replay_process(struct wapbl_replay *wr, off_t head, off_t tail)
2980 {
2981 off_t off;
2982 int error;
2983
2984 int logblklen = 1 << wr->wr_log_dev_bshift;
2985
2986 wapbl_blkhash_clear(wr);
2987
2988 off = tail;
2989 while (off != head) {
2990 struct wapbl_wc_null *wcn;
2991 off_t saveoff = off;
2992 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2993 if (error)
2994 goto errout;
2995 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2996 switch (wcn->wc_type) {
2997 case WAPBL_WC_BLOCKS:
2998 wapbl_replay_process_blocks(wr, &off);
2999 break;
3000
3001 case WAPBL_WC_REVOCATIONS:
3002 wapbl_replay_process_revocations(wr);
3003 break;
3004
3005 case WAPBL_WC_INODES:
3006 wapbl_replay_process_inodes(wr, saveoff, off);
3007 break;
3008
3009 default:
3010 printf("Unrecognized wapbl type: 0x%08x\n",
3011 wcn->wc_type);
3012 error = EFTYPE;
3013 goto errout;
3014 }
3015 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
3016 if (off != saveoff) {
3017 printf("wapbl_replay: corrupted records\n");
3018 error = EFTYPE;
3019 goto errout;
3020 }
3021 }
3022 return 0;
3023
3024 errout:
3025 wapbl_blkhash_clear(wr);
3026 return error;
3027 }
3028
3029 #if 0
3030 int
3031 wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
3032 {
3033 off_t off;
3034 int mismatchcnt = 0;
3035 int logblklen = 1 << wr->wr_log_dev_bshift;
3036 int fsblklen = 1 << wr->wr_fs_dev_bshift;
3037 void *scratch1 = wapbl_alloc(MAXBSIZE);
3038 void *scratch2 = wapbl_alloc(MAXBSIZE);
3039 int error = 0;
3040
3041 KDASSERT(wapbl_replay_isopen(wr));
3042
3043 off = wch->wc_tail;
3044 while (off != wch->wc_head) {
3045 struct wapbl_wc_null *wcn;
3046 #ifdef DEBUG
3047 off_t saveoff = off;
3048 #endif
3049 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
3050 if (error)
3051 goto out;
3052 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
3053 switch (wcn->wc_type) {
3054 case WAPBL_WC_BLOCKS:
3055 {
3056 struct wapbl_wc_blocklist *wc =
3057 (struct wapbl_wc_blocklist *)wr->wr_scratch;
3058 int i;
3059 for (i = 0; i < wc->wc_blkcount; i++) {
3060 int foundcnt = 0;
3061 int dirtycnt = 0;
3062 int j, n;
3063 /*
3064 * Check each physical block into the
3065 * hashtable independently
3066 */
3067 n = wc->wc_blocks[i].wc_dlen >>
3068 wch->wc_fs_dev_bshift;
3069 for (j = 0; j < n; j++) {
3070 struct wapbl_blk *wb =
3071 wapbl_blkhash_get(wr,
3072 wapbl_block_daddr(wc, i, j, fsblklen));
3073 if (wb && (wb->wb_off == off)) {
3074 foundcnt++;
3075 error =
3076 wapbl_circ_read(wr,
3077 scratch1, fsblklen,
3078 &off);
3079 if (error)
3080 goto out;
3081 error =
3082 wapbl_read(scratch2,
3083 fsblklen, fsdevvp,
3084 wb->wb_blk);
3085 if (error)
3086 goto out;
3087 if (memcmp(scratch1,
3088 scratch2,
3089 fsblklen)) {
3090 printf(
3091 "wapbl_verify: mismatch block %"PRId64" at off %"PRIdMAX"\n",
3092 wb->wb_blk, (intmax_t)off);
3093 dirtycnt++;
3094 mismatchcnt++;
3095 }
3096 } else {
3097 wapbl_circ_advance(wr,
3098 fsblklen, &off);
3099 }
3100 }
3101 #if 0
3102 /*
3103 * If all of the blocks in an entry
3104 * are clean, then remove all of its
3105 * blocks from the hashtable since they
3106 * never will need replay.
3107 */
3108 if ((foundcnt != 0) &&
3109 (dirtycnt == 0)) {
3110 off = saveoff;
3111 wapbl_circ_advance(wr,
3112 logblklen, &off);
3113 for (j = 0; j < n; j++) {
3114 struct wapbl_blk *wb =
3115 wapbl_blkhash_get(wr,
3116 wapbl_block_daddr(wc, i, j, fsblklen));
3117 if (wb &&
3118 (wb->wb_off == off)) {
3119 wapbl_blkhash_rem(wr, wb->wb_blk);
3120 }
3121 wapbl_circ_advance(wr,
3122 fsblklen, &off);
3123 }
3124 }
3125 #endif
3126 }
3127 }
3128 break;
3129 case WAPBL_WC_REVOCATIONS:
3130 case WAPBL_WC_INODES:
3131 break;
3132 default:
3133 KASSERT(0);
3134 }
3135 #ifdef DEBUG
3136 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
3137 KASSERT(off == saveoff);
3138 #endif
3139 }
3140 out:
3141 wapbl_free(scratch1, MAXBSIZE);
3142 wapbl_free(scratch2, MAXBSIZE);
3143 if (!error && mismatchcnt)
3144 error = EFTYPE;
3145 return error;
3146 }
3147 #endif
3148
3149 int
3150 wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
3151 {
3152 struct wapbl_blk *wb;
3153 size_t i;
3154 off_t off;
3155 void *scratch;
3156 int error = 0;
3157 int fsblklen = 1 << wr->wr_fs_dev_bshift;
3158
3159 KDASSERT(wapbl_replay_isopen(wr));
3160
3161 scratch = wapbl_alloc(MAXBSIZE);
3162
3163 for (i = 0; i <= wr->wr_blkhashmask; ++i) {
3164 LIST_FOREACH(wb, &wr->wr_blkhash[i], wb_hash) {
3165 off = wb->wb_off;
3166 error = wapbl_circ_read(wr, scratch, fsblklen, &off);
3167 if (error)
3168 break;
3169 error = wapbl_write(scratch, fsblklen, fsdevvp,
3170 wb->wb_blk);
3171 if (error)
3172 break;
3173 }
3174 }
3175
3176 wapbl_free(scratch, MAXBSIZE);
3177 return error;
3178 }
3179
3180 int
3181 wapbl_replay_can_read(struct wapbl_replay *wr, daddr_t blk, long len)
3182 {
3183 int fsblklen = 1 << wr->wr_fs_dev_bshift;
3184
3185 KDASSERT(wapbl_replay_isopen(wr));
3186 KASSERT((len % fsblklen) == 0);
3187
3188 while (len != 0) {
3189 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
3190 if (wb)
3191 return 1;
3192 len -= fsblklen;
3193 }
3194 return 0;
3195 }
3196
3197 int
3198 wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
3199 {
3200 int fsblklen = 1 << wr->wr_fs_dev_bshift;
3201
3202 KDASSERT(wapbl_replay_isopen(wr));
3203
3204 KASSERT((len % fsblklen) == 0);
3205
3206 while (len != 0) {
3207 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
3208 if (wb) {
3209 off_t off = wb->wb_off;
3210 int error;
3211 error = wapbl_circ_read(wr, data, fsblklen, &off);
3212 if (error)
3213 return error;
3214 }
3215 data = (uint8_t *)data + fsblklen;
3216 len -= fsblklen;
3217 blk++;
3218 }
3219 return 0;
3220 }
3221
3222 #ifdef _KERNEL
3223
3224 MODULE(MODULE_CLASS_VFS, wapbl, NULL);
3225
3226 static int
3227 wapbl_modcmd(modcmd_t cmd, void *arg)
3228 {
3229
3230 switch (cmd) {
3231 case MODULE_CMD_INIT:
3232 wapbl_init();
3233 return 0;
3234 case MODULE_CMD_FINI:
3235 return wapbl_fini();
3236 default:
3237 return ENOTTY;
3238 }
3239 }
3240 #endif /* _KERNEL */
3241