vfs_wapbl.c revision 1.71 1 /* $NetBSD: vfs_wapbl.c,v 1.71 2016/05/07 20:16:38 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2003, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This implements file system independent write ahead filesystem logging.
34 */
35
36 #define WAPBL_INTERNAL
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.71 2016/05/07 20:16:38 riastradh Exp $");
40
41 #include <sys/param.h>
42 #include <sys/bitops.h>
43 #include <sys/time.h>
44 #include <sys/wapbl.h>
45 #include <sys/wapbl_replay.h>
46
47 #ifdef _KERNEL
48
49 #include <sys/atomic.h>
50 #include <sys/conf.h>
51 #include <sys/file.h>
52 #include <sys/kauth.h>
53 #include <sys/kernel.h>
54 #include <sys/module.h>
55 #include <sys/mount.h>
56 #include <sys/mutex.h>
57 #include <sys/namei.h>
58 #include <sys/proc.h>
59 #include <sys/resourcevar.h>
60 #include <sys/sysctl.h>
61 #include <sys/uio.h>
62 #include <sys/vnode.h>
63
64 #include <miscfs/specfs/specdev.h>
65
66 #define wapbl_alloc(s) kmem_alloc((s), KM_SLEEP)
67 #define wapbl_free(a, s) kmem_free((a), (s))
68 #define wapbl_calloc(n, s) kmem_zalloc((n)*(s), KM_SLEEP)
69
70 static struct sysctllog *wapbl_sysctl;
71 static int wapbl_flush_disk_cache = 1;
72 static int wapbl_verbose_commit = 0;
73
74 static inline size_t wapbl_space_free(size_t, off_t, off_t);
75
76 #else /* !_KERNEL */
77
78 #include <assert.h>
79 #include <errno.h>
80 #include <stdbool.h>
81 #include <stdio.h>
82 #include <stdlib.h>
83 #include <string.h>
84
85 #define KDASSERT(x) assert(x)
86 #define KASSERT(x) assert(x)
87 #define wapbl_alloc(s) malloc(s)
88 #define wapbl_free(a, s) free(a)
89 #define wapbl_calloc(n, s) calloc((n), (s))
90
91 #endif /* !_KERNEL */
92
93 /*
94 * INTERNAL DATA STRUCTURES
95 */
96
97 /*
98 * This structure holds per-mount log information.
99 *
100 * Legend: a = atomic access only
101 * r = read-only after init
102 * l = rwlock held
103 * m = mutex held
104 * lm = rwlock held writing or mutex held
105 * u = unlocked access ok
106 * b = bufcache_lock held
107 */
108 LIST_HEAD(wapbl_ino_head, wapbl_ino);
109 struct wapbl {
110 struct vnode *wl_logvp; /* r: log here */
111 struct vnode *wl_devvp; /* r: log on this device */
112 struct mount *wl_mount; /* r: mountpoint wl is associated with */
113 daddr_t wl_logpbn; /* r: Physical block number of start of log */
114 int wl_log_dev_bshift; /* r: logarithm of device block size of log
115 device */
116 int wl_fs_dev_bshift; /* r: logarithm of device block size of
117 filesystem device */
118
119 unsigned wl_lock_count; /* m: Count of transactions in progress */
120
121 size_t wl_circ_size; /* r: Number of bytes in buffer of log */
122 size_t wl_circ_off; /* r: Number of bytes reserved at start */
123
124 size_t wl_bufcount_max; /* r: Number of buffers reserved for log */
125 size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */
126
127 off_t wl_head; /* l: Byte offset of log head */
128 off_t wl_tail; /* l: Byte offset of log tail */
129 /*
130 * WAPBL log layout, stored on wl_devvp at wl_logpbn:
131 *
132 * ___________________ wl_circ_size __________________
133 * / \
134 * +---------+---------+-------+--------------+--------+
135 * [ commit0 | commit1 | CCWCW | EEEEEEEEEEEE | CCCWCW ]
136 * +---------+---------+-------+--------------+--------+
137 * wl_circ_off --^ ^-- wl_head ^-- wl_tail
138 *
139 * commit0 and commit1 are commit headers. A commit header has
140 * a generation number, indicating which of the two headers is
141 * more recent, and an assignment of head and tail pointers.
142 * The rest is a circular queue of log records, starting at
143 * the byte offset wl_circ_off.
144 *
145 * E marks empty space for records.
146 * W marks records for block writes issued but waiting.
147 * C marks completed records.
148 *
149 * wapbl_flush writes new records to empty `E' spaces after
150 * wl_head from the current transaction in memory.
151 *
152 * wapbl_truncate advances wl_tail past any completed `C'
153 * records, freeing them up for use.
154 *
155 * head == tail == 0 means log is empty.
156 * head == tail != 0 means log is full.
157 *
158 * See assertions in wapbl_advance() for other boundary
159 * conditions.
160 *
161 * Only wapbl_flush moves the head, except when wapbl_truncate
162 * sets it to 0 to indicate that the log is empty.
163 *
164 * Only wapbl_truncate moves the tail, except when wapbl_flush
165 * sets it to wl_circ_off to indicate that the log is full.
166 */
167
168 struct wapbl_wc_header *wl_wc_header; /* l */
169 void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */
170
171 kmutex_t wl_mtx; /* u: short-term lock */
172 krwlock_t wl_rwlock; /* u: File system transaction lock */
173
174 /*
175 * Must be held while accessing
176 * wl_count or wl_bufs or head or tail
177 */
178
179 /*
180 * Callback called from within the flush routine to flush any extra
181 * bits. Note that flush may be skipped without calling this if
182 * there are no outstanding buffers in the transaction.
183 */
184 #if _KERNEL
185 wapbl_flush_fn_t wl_flush; /* r */
186 wapbl_flush_fn_t wl_flush_abort;/* r */
187 #endif
188
189 size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */
190 size_t wl_bufcount; /* m: Count of buffers in wl_bufs */
191 size_t wl_bcount; /* m: Total bcount of wl_bufs */
192
193 LIST_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
194
195 kcondvar_t wl_reclaimable_cv; /* m (obviously) */
196 size_t wl_reclaimable_bytes; /* m: Amount of space available for
197 reclamation by truncate */
198 int wl_error_count; /* m: # of wl_entries with errors */
199 size_t wl_reserved_bytes; /* never truncate log smaller than this */
200
201 #ifdef WAPBL_DEBUG_BUFBYTES
202 size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
203 #endif
204
205 daddr_t *wl_deallocblks;/* lm: address of block */
206 int *wl_dealloclens; /* lm: size of block */
207 int wl_dealloccnt; /* lm: total count */
208 int wl_dealloclim; /* l: max count */
209
210 /* hashtable of inode numbers for allocated but unlinked inodes */
211 /* synch ??? */
212 struct wapbl_ino_head *wl_inohash;
213 u_long wl_inohashmask;
214 int wl_inohashcnt;
215
216 SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* On disk transaction
217 accounting */
218
219 u_char *wl_buffer; /* l: buffer for wapbl_buffered_write() */
220 daddr_t wl_buffer_dblk; /* l: buffer disk block address */
221 size_t wl_buffer_used; /* l: buffer current use */
222 };
223
224 #ifdef WAPBL_DEBUG_PRINT
225 int wapbl_debug_print = WAPBL_DEBUG_PRINT;
226 #endif
227
228 /****************************************************************/
229 #ifdef _KERNEL
230
231 #ifdef WAPBL_DEBUG
232 struct wapbl *wapbl_debug_wl;
233 #endif
234
235 static int wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail);
236 static int wapbl_write_blocks(struct wapbl *wl, off_t *offp);
237 static int wapbl_write_revocations(struct wapbl *wl, off_t *offp);
238 static int wapbl_write_inodes(struct wapbl *wl, off_t *offp);
239 #endif /* _KERNEL */
240
241 static int wapbl_replay_process(struct wapbl_replay *wr, off_t, off_t);
242
243 static inline size_t wapbl_space_used(size_t avail, off_t head,
244 off_t tail);
245
246 #ifdef _KERNEL
247
248 static struct pool wapbl_entry_pool;
249
250 #define WAPBL_INODETRK_SIZE 83
251 static int wapbl_ino_pool_refcount;
252 static struct pool wapbl_ino_pool;
253 struct wapbl_ino {
254 LIST_ENTRY(wapbl_ino) wi_hash;
255 ino_t wi_ino;
256 mode_t wi_mode;
257 };
258
259 static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
260 static void wapbl_inodetrk_free(struct wapbl *wl);
261 static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
262
263 static size_t wapbl_transaction_len(struct wapbl *wl);
264 static inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
265
266 #if 0
267 int wapbl_replay_verify(struct wapbl_replay *, struct vnode *);
268 #endif
269
270 static int wapbl_replay_isopen1(struct wapbl_replay *);
271
272 /*
273 * This is useful for debugging. If set, the log will
274 * only be truncated when necessary.
275 */
276 int wapbl_lazy_truncate = 0;
277
278 struct wapbl_ops wapbl_ops = {
279 .wo_wapbl_discard = wapbl_discard,
280 .wo_wapbl_replay_isopen = wapbl_replay_isopen1,
281 .wo_wapbl_replay_can_read = wapbl_replay_can_read,
282 .wo_wapbl_replay_read = wapbl_replay_read,
283 .wo_wapbl_add_buf = wapbl_add_buf,
284 .wo_wapbl_remove_buf = wapbl_remove_buf,
285 .wo_wapbl_resize_buf = wapbl_resize_buf,
286 .wo_wapbl_begin = wapbl_begin,
287 .wo_wapbl_end = wapbl_end,
288 .wo_wapbl_junlock_assert= wapbl_junlock_assert,
289
290 /* XXX: the following is only used to say "this is a wapbl buf" */
291 .wo_wapbl_biodone = wapbl_biodone,
292 };
293
294 static int
295 wapbl_sysctl_init(void)
296 {
297 int rv;
298 const struct sysctlnode *rnode, *cnode;
299
300 wapbl_sysctl = NULL;
301
302 rv = sysctl_createv(&wapbl_sysctl, 0, NULL, &rnode,
303 CTLFLAG_PERMANENT,
304 CTLTYPE_NODE, "wapbl",
305 SYSCTL_DESCR("WAPBL journaling options"),
306 NULL, 0, NULL, 0,
307 CTL_VFS, CTL_CREATE, CTL_EOL);
308 if (rv)
309 return rv;
310
311 rv = sysctl_createv(&wapbl_sysctl, 0, &rnode, &cnode,
312 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
313 CTLTYPE_INT, "flush_disk_cache",
314 SYSCTL_DESCR("flush disk cache"),
315 NULL, 0, &wapbl_flush_disk_cache, 0,
316 CTL_CREATE, CTL_EOL);
317 if (rv)
318 return rv;
319
320 rv = sysctl_createv(&wapbl_sysctl, 0, &rnode, &cnode,
321 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
322 CTLTYPE_INT, "verbose_commit",
323 SYSCTL_DESCR("show time and size of wapbl log commits"),
324 NULL, 0, &wapbl_verbose_commit, 0,
325 CTL_CREATE, CTL_EOL);
326 return rv;
327 }
328
329 static void
330 wapbl_init(void)
331 {
332
333 pool_init(&wapbl_entry_pool, sizeof(struct wapbl_entry), 0, 0, 0,
334 "wapblentrypl", &pool_allocator_kmem, IPL_VM);
335
336 wapbl_sysctl_init();
337 }
338
339 static int
340 wapbl_fini(bool interface)
341 {
342
343 if (wapbl_sysctl != NULL)
344 sysctl_teardown(&wapbl_sysctl);
345
346 pool_destroy(&wapbl_entry_pool);
347
348 return 0;
349 }
350
351 static int
352 wapbl_start_flush_inodes(struct wapbl *wl, struct wapbl_replay *wr)
353 {
354 int error, i;
355
356 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
357 ("wapbl_start: reusing log with %d inodes\n", wr->wr_inodescnt));
358
359 /*
360 * Its only valid to reuse the replay log if its
361 * the same as the new log we just opened.
362 */
363 KDASSERT(!wapbl_replay_isopen(wr));
364 KASSERT(wl->wl_devvp->v_type == VBLK);
365 KASSERT(wr->wr_devvp->v_type == VBLK);
366 KASSERT(wl->wl_devvp->v_rdev == wr->wr_devvp->v_rdev);
367 KASSERT(wl->wl_logpbn == wr->wr_logpbn);
368 KASSERT(wl->wl_circ_size == wr->wr_circ_size);
369 KASSERT(wl->wl_circ_off == wr->wr_circ_off);
370 KASSERT(wl->wl_log_dev_bshift == wr->wr_log_dev_bshift);
371 KASSERT(wl->wl_fs_dev_bshift == wr->wr_fs_dev_bshift);
372
373 wl->wl_wc_header->wc_generation = wr->wr_generation + 1;
374
375 for (i = 0; i < wr->wr_inodescnt; i++)
376 wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
377 wr->wr_inodes[i].wr_imode);
378
379 /* Make sure new transaction won't overwrite old inodes list */
380 KDASSERT(wapbl_transaction_len(wl) <=
381 wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
382 wr->wr_inodestail));
383
384 wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
385 wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
386 wapbl_transaction_len(wl);
387
388 error = wapbl_write_inodes(wl, &wl->wl_head);
389 if (error)
390 return error;
391
392 KASSERT(wl->wl_head != wl->wl_tail);
393 KASSERT(wl->wl_head != 0);
394
395 return 0;
396 }
397
398 int
399 wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
400 daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
401 wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
402 {
403 struct wapbl *wl;
404 struct vnode *devvp;
405 daddr_t logpbn;
406 int error;
407 int log_dev_bshift = ilog2(blksize);
408 int fs_dev_bshift = log_dev_bshift;
409 int run;
410
411 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_start: vp=%p off=%" PRId64
412 " count=%zu blksize=%zu\n", vp, off, count, blksize));
413
414 if (log_dev_bshift > fs_dev_bshift) {
415 WAPBL_PRINTF(WAPBL_PRINT_OPEN,
416 ("wapbl: log device's block size cannot be larger "
417 "than filesystem's\n"));
418 /*
419 * Not currently implemented, although it could be if
420 * needed someday.
421 */
422 return ENOSYS;
423 }
424
425 if (off < 0)
426 return EINVAL;
427
428 if (blksize < DEV_BSIZE)
429 return EINVAL;
430 if (blksize % DEV_BSIZE)
431 return EINVAL;
432
433 /* XXXTODO: verify that the full load is writable */
434
435 /*
436 * XXX check for minimum log size
437 * minimum is governed by minimum amount of space
438 * to complete a transaction. (probably truncate)
439 */
440 /* XXX for now pick something minimal */
441 if ((count * blksize) < MAXPHYS) {
442 return ENOSPC;
443 }
444
445 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
446 return error;
447 }
448
449 wl = wapbl_calloc(1, sizeof(*wl));
450 rw_init(&wl->wl_rwlock);
451 mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
452 cv_init(&wl->wl_reclaimable_cv, "wapblrec");
453 LIST_INIT(&wl->wl_bufs);
454 SIMPLEQ_INIT(&wl->wl_entries);
455
456 wl->wl_logvp = vp;
457 wl->wl_devvp = devvp;
458 wl->wl_mount = mp;
459 wl->wl_logpbn = logpbn;
460 wl->wl_log_dev_bshift = log_dev_bshift;
461 wl->wl_fs_dev_bshift = fs_dev_bshift;
462
463 wl->wl_flush = flushfn;
464 wl->wl_flush_abort = flushabortfn;
465
466 /* Reserve two log device blocks for the commit headers */
467 wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
468 wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
469 /* truncate the log usage to a multiple of log_dev_bshift */
470 wl->wl_circ_size >>= wl->wl_log_dev_bshift;
471 wl->wl_circ_size <<= wl->wl_log_dev_bshift;
472
473 /*
474 * wl_bufbytes_max limits the size of the in memory transaction space.
475 * - Since buffers are allocated and accounted for in units of
476 * PAGE_SIZE it is required to be a multiple of PAGE_SIZE
477 * (i.e. 1<<PAGE_SHIFT)
478 * - Since the log device has to be written in units of
479 * 1<<wl_log_dev_bshift it is required to be a mulitple of
480 * 1<<wl_log_dev_bshift.
481 * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
482 * it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
483 * Therefore it must be multiple of the least common multiple of those
484 * three quantities. Fortunately, all of those quantities are
485 * guaranteed to be a power of two, and the least common multiple of
486 * a set of numbers which are all powers of two is simply the maximum
487 * of those numbers. Finally, the maximum logarithm of a power of two
488 * is the same as the log of the maximum power of two. So we can do
489 * the following operations to size wl_bufbytes_max:
490 */
491
492 /* XXX fix actual number of pages reserved per filesystem. */
493 wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
494
495 /* Round wl_bufbytes_max to the largest power of two constraint */
496 wl->wl_bufbytes_max >>= PAGE_SHIFT;
497 wl->wl_bufbytes_max <<= PAGE_SHIFT;
498 wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
499 wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
500 wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
501 wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
502
503 /* XXX maybe use filesystem fragment size instead of 1024 */
504 /* XXX fix actual number of buffers reserved per filesystem. */
505 wl->wl_bufcount_max = (nbuf / 2) * 1024;
506
507 /* XXX tie this into resource estimation */
508 wl->wl_dealloclim = wl->wl_bufbytes_max / mp->mnt_stat.f_bsize / 2;
509
510 wl->wl_deallocblks = wapbl_alloc(sizeof(*wl->wl_deallocblks) *
511 wl->wl_dealloclim);
512 wl->wl_dealloclens = wapbl_alloc(sizeof(*wl->wl_dealloclens) *
513 wl->wl_dealloclim);
514
515 wl->wl_buffer = wapbl_alloc(MAXPHYS);
516 wl->wl_buffer_used = 0;
517
518 wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
519
520 /* Initialize the commit header */
521 {
522 struct wapbl_wc_header *wc;
523 size_t len = 1 << wl->wl_log_dev_bshift;
524 wc = wapbl_calloc(1, len);
525 wc->wc_type = WAPBL_WC_HEADER;
526 wc->wc_len = len;
527 wc->wc_circ_off = wl->wl_circ_off;
528 wc->wc_circ_size = wl->wl_circ_size;
529 /* XXX wc->wc_fsid */
530 wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
531 wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
532 wl->wl_wc_header = wc;
533 wl->wl_wc_scratch = wapbl_alloc(len);
534 }
535
536 /*
537 * if there was an existing set of unlinked but
538 * allocated inodes, preserve it in the new
539 * log.
540 */
541 if (wr && wr->wr_inodescnt) {
542 error = wapbl_start_flush_inodes(wl, wr);
543 if (error)
544 goto errout;
545 }
546
547 error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
548 if (error) {
549 goto errout;
550 }
551
552 *wlp = wl;
553 #if defined(WAPBL_DEBUG)
554 wapbl_debug_wl = wl;
555 #endif
556
557 return 0;
558 errout:
559 wapbl_discard(wl);
560 wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
561 wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
562 wapbl_free(wl->wl_deallocblks,
563 sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim);
564 wapbl_free(wl->wl_dealloclens,
565 sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim);
566 wapbl_free(wl->wl_buffer, MAXPHYS);
567 wapbl_inodetrk_free(wl);
568 wapbl_free(wl, sizeof(*wl));
569
570 return error;
571 }
572
573 /*
574 * Like wapbl_flush, only discards the transaction
575 * completely
576 */
577
578 void
579 wapbl_discard(struct wapbl *wl)
580 {
581 struct wapbl_entry *we;
582 struct buf *bp;
583 int i;
584
585 /*
586 * XXX we may consider using upgrade here
587 * if we want to call flush from inside a transaction
588 */
589 rw_enter(&wl->wl_rwlock, RW_WRITER);
590 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
591 wl->wl_dealloccnt);
592
593 #ifdef WAPBL_DEBUG_PRINT
594 {
595 pid_t pid = -1;
596 lwpid_t lid = -1;
597 if (curproc)
598 pid = curproc->p_pid;
599 if (curlwp)
600 lid = curlwp->l_lid;
601 #ifdef WAPBL_DEBUG_BUFBYTES
602 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
603 ("wapbl_discard: thread %d.%d discarding "
604 "transaction\n"
605 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
606 "deallocs=%d inodes=%d\n"
607 "\terrcnt = %u, reclaimable=%zu reserved=%zu "
608 "unsynced=%zu\n",
609 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
610 wl->wl_bcount, wl->wl_dealloccnt,
611 wl->wl_inohashcnt, wl->wl_error_count,
612 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
613 wl->wl_unsynced_bufbytes));
614 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
615 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
616 ("\tentry: bufcount = %zu, reclaimable = %zu, "
617 "error = %d, unsynced = %zu\n",
618 we->we_bufcount, we->we_reclaimable_bytes,
619 we->we_error, we->we_unsynced_bufbytes));
620 }
621 #else /* !WAPBL_DEBUG_BUFBYTES */
622 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
623 ("wapbl_discard: thread %d.%d discarding transaction\n"
624 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
625 "deallocs=%d inodes=%d\n"
626 "\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
627 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
628 wl->wl_bcount, wl->wl_dealloccnt,
629 wl->wl_inohashcnt, wl->wl_error_count,
630 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes));
631 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
632 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
633 ("\tentry: bufcount = %zu, reclaimable = %zu, "
634 "error = %d\n",
635 we->we_bufcount, we->we_reclaimable_bytes,
636 we->we_error));
637 }
638 #endif /* !WAPBL_DEBUG_BUFBYTES */
639 }
640 #endif /* WAPBL_DEBUG_PRINT */
641
642 for (i = 0; i <= wl->wl_inohashmask; i++) {
643 struct wapbl_ino_head *wih;
644 struct wapbl_ino *wi;
645
646 wih = &wl->wl_inohash[i];
647 while ((wi = LIST_FIRST(wih)) != NULL) {
648 LIST_REMOVE(wi, wi_hash);
649 pool_put(&wapbl_ino_pool, wi);
650 KASSERT(wl->wl_inohashcnt > 0);
651 wl->wl_inohashcnt--;
652 }
653 }
654
655 /*
656 * clean buffer list
657 */
658 mutex_enter(&bufcache_lock);
659 mutex_enter(&wl->wl_mtx);
660 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
661 if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
662 /*
663 * The buffer will be unlocked and
664 * removed from the transaction in brelse
665 */
666 mutex_exit(&wl->wl_mtx);
667 brelsel(bp, 0);
668 mutex_enter(&wl->wl_mtx);
669 }
670 }
671 mutex_exit(&wl->wl_mtx);
672 mutex_exit(&bufcache_lock);
673
674 /*
675 * Remove references to this wl from wl_entries, free any which
676 * no longer have buffers, others will be freed in wapbl_biodone
677 * when they no longer have any buffers.
678 */
679 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
680 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
681 /* XXX should we be accumulating wl_error_count
682 * and increasing reclaimable bytes ? */
683 we->we_wapbl = NULL;
684 if (we->we_bufcount == 0) {
685 #ifdef WAPBL_DEBUG_BUFBYTES
686 KASSERT(we->we_unsynced_bufbytes == 0);
687 #endif
688 pool_put(&wapbl_entry_pool, we);
689 }
690 }
691
692 /* Discard list of deallocs */
693 wl->wl_dealloccnt = 0;
694 /* XXX should we clear wl_reserved_bytes? */
695
696 KASSERT(wl->wl_bufbytes == 0);
697 KASSERT(wl->wl_bcount == 0);
698 KASSERT(wl->wl_bufcount == 0);
699 KASSERT(LIST_EMPTY(&wl->wl_bufs));
700 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
701 KASSERT(wl->wl_inohashcnt == 0);
702
703 rw_exit(&wl->wl_rwlock);
704 }
705
706 int
707 wapbl_stop(struct wapbl *wl, int force)
708 {
709 int error;
710
711 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
712 error = wapbl_flush(wl, 1);
713 if (error) {
714 if (force)
715 wapbl_discard(wl);
716 else
717 return error;
718 }
719
720 /* Unlinked inodes persist after a flush */
721 if (wl->wl_inohashcnt) {
722 if (force) {
723 wapbl_discard(wl);
724 } else {
725 return EBUSY;
726 }
727 }
728
729 KASSERT(wl->wl_bufbytes == 0);
730 KASSERT(wl->wl_bcount == 0);
731 KASSERT(wl->wl_bufcount == 0);
732 KASSERT(LIST_EMPTY(&wl->wl_bufs));
733 KASSERT(wl->wl_dealloccnt == 0);
734 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
735 KASSERT(wl->wl_inohashcnt == 0);
736
737 wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
738 wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
739 wapbl_free(wl->wl_deallocblks,
740 sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim);
741 wapbl_free(wl->wl_dealloclens,
742 sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim);
743 wapbl_free(wl->wl_buffer, MAXPHYS);
744 wapbl_inodetrk_free(wl);
745
746 cv_destroy(&wl->wl_reclaimable_cv);
747 mutex_destroy(&wl->wl_mtx);
748 rw_destroy(&wl->wl_rwlock);
749 wapbl_free(wl, sizeof(*wl));
750
751 return 0;
752 }
753
754 /****************************************************************/
755 /*
756 * Unbuffered disk I/O
757 */
758
759 static int
760 wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
761 {
762 struct pstats *pstats = curlwp->l_proc->p_stats;
763 struct buf *bp;
764 int error;
765
766 KASSERT((flags & ~(B_WRITE | B_READ)) == 0);
767 KASSERT(devvp->v_type == VBLK);
768
769 if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
770 mutex_enter(devvp->v_interlock);
771 devvp->v_numoutput++;
772 mutex_exit(devvp->v_interlock);
773 pstats->p_ru.ru_oublock++;
774 } else {
775 pstats->p_ru.ru_inblock++;
776 }
777
778 bp = getiobuf(devvp, true);
779 bp->b_flags = flags;
780 bp->b_cflags = BC_BUSY; /* silly & dubious */
781 bp->b_dev = devvp->v_rdev;
782 bp->b_data = data;
783 bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
784 bp->b_blkno = pbn;
785 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
786
787 WAPBL_PRINTF(WAPBL_PRINT_IO,
788 ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%"PRIx64"\n",
789 BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
790 bp->b_blkno, bp->b_dev));
791
792 VOP_STRATEGY(devvp, bp);
793
794 error = biowait(bp);
795 putiobuf(bp);
796
797 if (error) {
798 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
799 ("wapbl_doio: %s %zu bytes at block %" PRId64
800 " on dev 0x%"PRIx64" failed with error %d\n",
801 (((flags & (B_WRITE | B_READ)) == B_WRITE) ?
802 "write" : "read"),
803 len, pbn, devvp->v_rdev, error));
804 }
805
806 return error;
807 }
808
809 /*
810 * wapbl_write(data, len, devvp, pbn)
811 *
812 * Synchronously write len bytes from data to physical block pbn
813 * on devvp.
814 */
815 int
816 wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
817 {
818
819 return wapbl_doio(data, len, devvp, pbn, B_WRITE);
820 }
821
822 /*
823 * wapbl_read(data, len, devvp, pbn)
824 *
825 * Synchronously read len bytes into data from physical block pbn
826 * on devvp.
827 */
828 int
829 wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
830 {
831
832 return wapbl_doio(data, len, devvp, pbn, B_READ);
833 }
834
835 /****************************************************************/
836 /*
837 * Buffered disk writes -- try to coalesce writes and emit
838 * MAXPHYS-aligned blocks.
839 */
840
841 /*
842 * wapbl_buffered_flush(wl)
843 *
844 * Flush any buffered writes from wapbl_buffered_write.
845 */
846 static int
847 wapbl_buffered_flush(struct wapbl *wl)
848 {
849 int error;
850
851 if (wl->wl_buffer_used == 0)
852 return 0;
853
854 error = wapbl_doio(wl->wl_buffer, wl->wl_buffer_used,
855 wl->wl_devvp, wl->wl_buffer_dblk, B_WRITE);
856 wl->wl_buffer_used = 0;
857
858 return error;
859 }
860
861 /*
862 * wapbl_buffered_write(data, len, wl, pbn)
863 *
864 * Write len bytes from data to physical block pbn on
865 * wl->wl_devvp. The write may not complete until
866 * wapbl_buffered_flush.
867 */
868 static int
869 wapbl_buffered_write(void *data, size_t len, struct wapbl *wl, daddr_t pbn)
870 {
871 int error;
872 size_t resid;
873
874 /*
875 * If not adjacent to buffered data flush first. Disk block
876 * address is always valid for non-empty buffer.
877 */
878 if (wl->wl_buffer_used > 0 &&
879 pbn != wl->wl_buffer_dblk + btodb(wl->wl_buffer_used)) {
880 error = wapbl_buffered_flush(wl);
881 if (error)
882 return error;
883 }
884 /*
885 * If this write goes to an empty buffer we have to
886 * save the disk block address first.
887 */
888 if (wl->wl_buffer_used == 0)
889 wl->wl_buffer_dblk = pbn;
890 /*
891 * Remaining space so this buffer ends on a MAXPHYS boundary.
892 *
893 * Cannot become less or equal zero as the buffer would have been
894 * flushed on the last call then.
895 */
896 resid = MAXPHYS - dbtob(wl->wl_buffer_dblk % btodb(MAXPHYS)) -
897 wl->wl_buffer_used;
898 KASSERT(resid > 0);
899 KASSERT(dbtob(btodb(resid)) == resid);
900 if (len >= resid) {
901 memcpy(wl->wl_buffer + wl->wl_buffer_used, data, resid);
902 wl->wl_buffer_used += resid;
903 error = wapbl_doio(wl->wl_buffer, wl->wl_buffer_used,
904 wl->wl_devvp, wl->wl_buffer_dblk, B_WRITE);
905 data = (uint8_t *)data + resid;
906 len -= resid;
907 wl->wl_buffer_dblk = pbn + btodb(resid);
908 wl->wl_buffer_used = 0;
909 if (error)
910 return error;
911 }
912 KASSERT(len < MAXPHYS);
913 if (len > 0) {
914 memcpy(wl->wl_buffer + wl->wl_buffer_used, data, len);
915 wl->wl_buffer_used += len;
916 }
917
918 return 0;
919 }
920
921 /*
922 * wapbl_circ_write(wl, data, len, offp)
923 *
924 * Write len bytes from data to the circular queue of wl, starting
925 * at linear byte offset *offp, and returning the new linear byte
926 * offset in *offp.
927 *
928 * If the starting linear byte offset precedes wl->wl_circ_off,
929 * the write instead begins at wl->wl_circ_off. XXX WTF? This
930 * should be a KASSERT, not a conditional.
931 *
932 * The write is buffered in wl and must be flushed with
933 * wapbl_buffered_flush before it will be submitted to the disk.
934 */
935 static int
936 wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
937 {
938 size_t slen;
939 off_t off = *offp;
940 int error;
941 daddr_t pbn;
942
943 KDASSERT(((len >> wl->wl_log_dev_bshift) <<
944 wl->wl_log_dev_bshift) == len);
945
946 if (off < wl->wl_circ_off)
947 off = wl->wl_circ_off;
948 slen = wl->wl_circ_off + wl->wl_circ_size - off;
949 if (slen < len) {
950 pbn = wl->wl_logpbn + (off >> wl->wl_log_dev_bshift);
951 #ifdef _KERNEL
952 pbn = btodb(pbn << wl->wl_log_dev_bshift);
953 #endif
954 error = wapbl_buffered_write(data, slen, wl, pbn);
955 if (error)
956 return error;
957 data = (uint8_t *)data + slen;
958 len -= slen;
959 off = wl->wl_circ_off;
960 }
961 pbn = wl->wl_logpbn + (off >> wl->wl_log_dev_bshift);
962 #ifdef _KERNEL
963 pbn = btodb(pbn << wl->wl_log_dev_bshift);
964 #endif
965 error = wapbl_buffered_write(data, len, wl, pbn);
966 if (error)
967 return error;
968 off += len;
969 if (off >= wl->wl_circ_off + wl->wl_circ_size)
970 off = wl->wl_circ_off;
971 *offp = off;
972 return 0;
973 }
974
975 /****************************************************************/
976 /*
977 * WAPBL transactions: entering, adding/removing bufs, and exiting
978 */
979
980 int
981 wapbl_begin(struct wapbl *wl, const char *file, int line)
982 {
983 int doflush;
984 unsigned lockcount;
985
986 KDASSERT(wl);
987
988 /*
989 * XXX this needs to be made much more sophisticated.
990 * perhaps each wapbl_begin could reserve a specified
991 * number of buffers and bytes.
992 */
993 mutex_enter(&wl->wl_mtx);
994 lockcount = wl->wl_lock_count;
995 doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) >
996 wl->wl_bufbytes_max / 2) ||
997 ((wl->wl_bufcount + (lockcount * 10)) >
998 wl->wl_bufcount_max / 2) ||
999 (wapbl_transaction_len(wl) > wl->wl_circ_size / 2) ||
1000 (wl->wl_dealloccnt >= (wl->wl_dealloclim / 2));
1001 mutex_exit(&wl->wl_mtx);
1002
1003 if (doflush) {
1004 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1005 ("force flush lockcnt=%d bufbytes=%zu "
1006 "(max=%zu) bufcount=%zu (max=%zu) "
1007 "dealloccnt %d (lim=%d)\n",
1008 lockcount, wl->wl_bufbytes,
1009 wl->wl_bufbytes_max, wl->wl_bufcount,
1010 wl->wl_bufcount_max,
1011 wl->wl_dealloccnt, wl->wl_dealloclim));
1012 }
1013
1014 if (doflush) {
1015 int error = wapbl_flush(wl, 0);
1016 if (error)
1017 return error;
1018 }
1019
1020 rw_enter(&wl->wl_rwlock, RW_READER);
1021 mutex_enter(&wl->wl_mtx);
1022 wl->wl_lock_count++;
1023 mutex_exit(&wl->wl_mtx);
1024
1025 #if defined(WAPBL_DEBUG_PRINT)
1026 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
1027 ("wapbl_begin thread %d.%d with bufcount=%zu "
1028 "bufbytes=%zu bcount=%zu at %s:%d\n",
1029 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1030 wl->wl_bufbytes, wl->wl_bcount, file, line));
1031 #endif
1032
1033 return 0;
1034 }
1035
1036 void
1037 wapbl_end(struct wapbl *wl)
1038 {
1039
1040 #if defined(WAPBL_DEBUG_PRINT)
1041 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
1042 ("wapbl_end thread %d.%d with bufcount=%zu "
1043 "bufbytes=%zu bcount=%zu\n",
1044 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1045 wl->wl_bufbytes, wl->wl_bcount));
1046 #endif
1047
1048 /*
1049 * XXX this could be handled more gracefully, perhaps place
1050 * only a partial transaction in the log and allow the
1051 * remaining to flush without the protection of the journal.
1052 */
1053 KASSERTMSG((wapbl_transaction_len(wl) <=
1054 (wl->wl_circ_size - wl->wl_reserved_bytes)),
1055 "wapbl_end: current transaction too big to flush");
1056
1057 mutex_enter(&wl->wl_mtx);
1058 KASSERT(wl->wl_lock_count > 0);
1059 wl->wl_lock_count--;
1060 mutex_exit(&wl->wl_mtx);
1061
1062 rw_exit(&wl->wl_rwlock);
1063 }
1064
1065 void
1066 wapbl_add_buf(struct wapbl *wl, struct buf * bp)
1067 {
1068
1069 KASSERT(bp->b_cflags & BC_BUSY);
1070 KASSERT(bp->b_vp);
1071
1072 wapbl_jlock_assert(wl);
1073
1074 #if 0
1075 /*
1076 * XXX this might be an issue for swapfiles.
1077 * see uvm_swap.c:1702
1078 *
1079 * XXX2 why require it then? leap of semantics?
1080 */
1081 KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
1082 #endif
1083
1084 mutex_enter(&wl->wl_mtx);
1085 if (bp->b_flags & B_LOCKED) {
1086 LIST_REMOVE(bp, b_wapbllist);
1087 WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
1088 ("wapbl_add_buf thread %d.%d re-adding buf %p "
1089 "with %d bytes %d bcount\n",
1090 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
1091 bp->b_bcount));
1092 } else {
1093 /* unlocked by dirty buffers shouldn't exist */
1094 KASSERT(!(bp->b_oflags & BO_DELWRI));
1095 wl->wl_bufbytes += bp->b_bufsize;
1096 wl->wl_bcount += bp->b_bcount;
1097 wl->wl_bufcount++;
1098 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
1099 ("wapbl_add_buf thread %d.%d adding buf %p "
1100 "with %d bytes %d bcount\n",
1101 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
1102 bp->b_bcount));
1103 }
1104 LIST_INSERT_HEAD(&wl->wl_bufs, bp, b_wapbllist);
1105 mutex_exit(&wl->wl_mtx);
1106
1107 bp->b_flags |= B_LOCKED;
1108 }
1109
1110 static void
1111 wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
1112 {
1113
1114 KASSERT(mutex_owned(&wl->wl_mtx));
1115 KASSERT(bp->b_cflags & BC_BUSY);
1116 wapbl_jlock_assert(wl);
1117
1118 #if 0
1119 /*
1120 * XXX this might be an issue for swapfiles.
1121 * see uvm_swap.c:1725
1122 *
1123 * XXXdeux: see above
1124 */
1125 KASSERT((bp->b_flags & BC_NOCACHE) == 0);
1126 #endif
1127 KASSERT(bp->b_flags & B_LOCKED);
1128
1129 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
1130 ("wapbl_remove_buf thread %d.%d removing buf %p with "
1131 "%d bytes %d bcount\n",
1132 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, bp->b_bcount));
1133
1134 KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
1135 wl->wl_bufbytes -= bp->b_bufsize;
1136 KASSERT(wl->wl_bcount >= bp->b_bcount);
1137 wl->wl_bcount -= bp->b_bcount;
1138 KASSERT(wl->wl_bufcount > 0);
1139 wl->wl_bufcount--;
1140 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1141 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1142 LIST_REMOVE(bp, b_wapbllist);
1143
1144 bp->b_flags &= ~B_LOCKED;
1145 }
1146
1147 /* called from brelsel() in vfs_bio among other places */
1148 void
1149 wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
1150 {
1151
1152 mutex_enter(&wl->wl_mtx);
1153 wapbl_remove_buf_locked(wl, bp);
1154 mutex_exit(&wl->wl_mtx);
1155 }
1156
1157 void
1158 wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
1159 {
1160
1161 KASSERT(bp->b_cflags & BC_BUSY);
1162
1163 /*
1164 * XXX: why does this depend on B_LOCKED? otherwise the buf
1165 * is not for a transaction? if so, why is this called in the
1166 * first place?
1167 */
1168 if (bp->b_flags & B_LOCKED) {
1169 mutex_enter(&wl->wl_mtx);
1170 wl->wl_bufbytes += bp->b_bufsize - oldsz;
1171 wl->wl_bcount += bp->b_bcount - oldcnt;
1172 mutex_exit(&wl->wl_mtx);
1173 }
1174 }
1175
1176 #endif /* _KERNEL */
1177
1178 /****************************************************************/
1179 /* Some utility inlines */
1180
1181 /*
1182 * wapbl_space_used(avail, head, tail)
1183 *
1184 * Number of bytes used in a circular queue of avail total bytes,
1185 * from tail to head.
1186 */
1187 static inline size_t
1188 wapbl_space_used(size_t avail, off_t head, off_t tail)
1189 {
1190
1191 if (tail == 0) {
1192 KASSERT(head == 0);
1193 return 0;
1194 }
1195 return ((head + (avail - 1) - tail) % avail) + 1;
1196 }
1197
1198 #ifdef _KERNEL
1199 /*
1200 * wapbl_advance(size, off, oldoff, delta)
1201 *
1202 * Given a byte offset oldoff into a circular queue of size bytes
1203 * starting at off, return a new byte offset oldoff + delta into
1204 * the circular queue.
1205 */
1206 static inline off_t
1207 wapbl_advance(size_t size, size_t off, off_t oldoff, size_t delta)
1208 {
1209 off_t newoff;
1210
1211 /* Define acceptable ranges for inputs. */
1212 KASSERT(delta <= (size_t)size);
1213 KASSERT((oldoff == 0) || ((size_t)oldoff >= off));
1214 KASSERT(oldoff < (off_t)(size + off));
1215
1216 if ((oldoff == 0) && (delta != 0))
1217 newoff = off + delta;
1218 else if ((oldoff + delta) < (size + off))
1219 newoff = oldoff + delta;
1220 else
1221 newoff = (oldoff + delta) - size;
1222
1223 /* Note some interesting axioms */
1224 KASSERT((delta != 0) || (newoff == oldoff));
1225 KASSERT((delta == 0) || (newoff != 0));
1226 KASSERT((delta != (size)) || (newoff == oldoff));
1227
1228 /* Define acceptable ranges for output. */
1229 KASSERT((newoff == 0) || ((size_t)newoff >= off));
1230 KASSERT((size_t)newoff < (size + off));
1231 return newoff;
1232 }
1233
1234 /*
1235 * wapbl_space_free(avail, head, tail)
1236 *
1237 * Number of bytes free in a circular queue of avail total bytes,
1238 * in which everything from tail to head is used.
1239 */
1240 static inline size_t
1241 wapbl_space_free(size_t avail, off_t head, off_t tail)
1242 {
1243
1244 return avail - wapbl_space_used(avail, head, tail);
1245 }
1246
1247 /*
1248 * wapbl_advance_head(size, off, delta, headp, tailp)
1249 *
1250 * In a circular queue of size bytes starting at off, given the
1251 * old head and tail offsets *headp and *tailp, store the new head
1252 * and tail offsets in *headp and *tailp resulting from adding
1253 * delta bytes of data to the head.
1254 */
1255 static inline void
1256 wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1257 off_t *tailp)
1258 {
1259 off_t head = *headp;
1260 off_t tail = *tailp;
1261
1262 KASSERT(delta <= wapbl_space_free(size, head, tail));
1263 head = wapbl_advance(size, off, head, delta);
1264 if ((tail == 0) && (head != 0))
1265 tail = off;
1266 *headp = head;
1267 *tailp = tail;
1268 }
1269
1270 /*
1271 * wapbl_advance_tail(size, off, delta, headp, tailp)
1272 *
1273 * In a circular queue of size bytes starting at off, given the
1274 * old head and tail offsets *headp and *tailp, store the new head
1275 * and tail offsets in *headp and *tailp resulting from removing
1276 * delta bytes of data from the tail.
1277 */
1278 static inline void
1279 wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1280 off_t *tailp)
1281 {
1282 off_t head = *headp;
1283 off_t tail = *tailp;
1284
1285 KASSERT(delta <= wapbl_space_used(size, head, tail));
1286 tail = wapbl_advance(size, off, tail, delta);
1287 if (head == tail) {
1288 head = tail = 0;
1289 }
1290 *headp = head;
1291 *tailp = tail;
1292 }
1293
1294
1295 /****************************************************************/
1296
1297 /*
1298 * wapbl_truncate(wl, minfree, waitonly)
1299 *
1300 * Wait until at least minfree bytes are available in the log.
1301 *
1302 * If it was necessary to wait for writes to complete, and if
1303 * waitonly is not true, advance the circular queue tail to
1304 * reflect the new write completions and issue a write commit to
1305 * the log.
1306 *
1307 * => Caller must hold wl->wl_rwlock writer lock.
1308 */
1309 static int
1310 wapbl_truncate(struct wapbl *wl, size_t minfree, int waitonly)
1311 {
1312 size_t delta;
1313 size_t avail;
1314 off_t head;
1315 off_t tail;
1316 int error = 0;
1317
1318 KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1319 KASSERT(rw_write_held(&wl->wl_rwlock));
1320
1321 mutex_enter(&wl->wl_mtx);
1322
1323 /*
1324 * First check to see if we have to do a commit
1325 * at all.
1326 */
1327 avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1328 if (minfree < avail) {
1329 mutex_exit(&wl->wl_mtx);
1330 return 0;
1331 }
1332 minfree -= avail;
1333 while ((wl->wl_error_count == 0) &&
1334 (wl->wl_reclaimable_bytes < minfree)) {
1335 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1336 ("wapbl_truncate: sleeping on %p wl=%p bytes=%zd "
1337 "minfree=%zd\n",
1338 &wl->wl_reclaimable_bytes, wl, wl->wl_reclaimable_bytes,
1339 minfree));
1340
1341 cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1342 }
1343 if (wl->wl_reclaimable_bytes < minfree) {
1344 KASSERT(wl->wl_error_count);
1345 /* XXX maybe get actual error from buffer instead someday? */
1346 error = EIO;
1347 }
1348 head = wl->wl_head;
1349 tail = wl->wl_tail;
1350 delta = wl->wl_reclaimable_bytes;
1351
1352 /* If all of of the entries are flushed, then be sure to keep
1353 * the reserved bytes reserved. Watch out for discarded transactions,
1354 * which could leave more bytes reserved than are reclaimable.
1355 */
1356 if (SIMPLEQ_EMPTY(&wl->wl_entries) &&
1357 (delta >= wl->wl_reserved_bytes)) {
1358 delta -= wl->wl_reserved_bytes;
1359 }
1360 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1361 &tail);
1362 KDASSERT(wl->wl_reserved_bytes <=
1363 wapbl_space_used(wl->wl_circ_size, head, tail));
1364 mutex_exit(&wl->wl_mtx);
1365
1366 if (error)
1367 return error;
1368
1369 if (waitonly)
1370 return 0;
1371
1372 /*
1373 * This is where head, tail and delta are unprotected
1374 * from races against itself or flush. This is ok since
1375 * we only call this routine from inside flush itself.
1376 *
1377 * XXX: how can it race against itself when accessed only
1378 * from behind the write-locked rwlock?
1379 */
1380 error = wapbl_write_commit(wl, head, tail);
1381 if (error)
1382 return error;
1383
1384 wl->wl_head = head;
1385 wl->wl_tail = tail;
1386
1387 mutex_enter(&wl->wl_mtx);
1388 KASSERT(wl->wl_reclaimable_bytes >= delta);
1389 wl->wl_reclaimable_bytes -= delta;
1390 mutex_exit(&wl->wl_mtx);
1391 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1392 ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1393 curproc->p_pid, curlwp->l_lid, delta));
1394
1395 return 0;
1396 }
1397
1398 /****************************************************************/
1399
1400 void
1401 wapbl_biodone(struct buf *bp)
1402 {
1403 struct wapbl_entry *we = bp->b_private;
1404 struct wapbl *wl = we->we_wapbl;
1405 #ifdef WAPBL_DEBUG_BUFBYTES
1406 const int bufsize = bp->b_bufsize;
1407 #endif
1408
1409 /*
1410 * Handle possible flushing of buffers after log has been
1411 * decomissioned.
1412 */
1413 if (!wl) {
1414 KASSERT(we->we_bufcount > 0);
1415 we->we_bufcount--;
1416 #ifdef WAPBL_DEBUG_BUFBYTES
1417 KASSERT(we->we_unsynced_bufbytes >= bufsize);
1418 we->we_unsynced_bufbytes -= bufsize;
1419 #endif
1420
1421 if (we->we_bufcount == 0) {
1422 #ifdef WAPBL_DEBUG_BUFBYTES
1423 KASSERT(we->we_unsynced_bufbytes == 0);
1424 #endif
1425 pool_put(&wapbl_entry_pool, we);
1426 }
1427
1428 brelse(bp, 0);
1429 return;
1430 }
1431
1432 #ifdef ohbother
1433 KDASSERT(bp->b_oflags & BO_DONE);
1434 KDASSERT(!(bp->b_oflags & BO_DELWRI));
1435 KDASSERT(bp->b_flags & B_ASYNC);
1436 KDASSERT(bp->b_cflags & BC_BUSY);
1437 KDASSERT(!(bp->b_flags & B_LOCKED));
1438 KDASSERT(!(bp->b_flags & B_READ));
1439 KDASSERT(!(bp->b_cflags & BC_INVAL));
1440 KDASSERT(!(bp->b_cflags & BC_NOCACHE));
1441 #endif
1442
1443 if (bp->b_error) {
1444 #ifdef notyet /* Can't currently handle possible dirty buffer reuse */
1445 /*
1446 * XXXpooka: interfaces not fully updated
1447 * Note: this was not enabled in the original patch
1448 * against netbsd4 either. I don't know if comment
1449 * above is true or not.
1450 */
1451
1452 /*
1453 * If an error occurs, report the error and leave the
1454 * buffer as a delayed write on the LRU queue.
1455 * restarting the write would likely result in
1456 * an error spinloop, so let it be done harmlessly
1457 * by the syncer.
1458 */
1459 bp->b_flags &= ~(B_DONE);
1460 simple_unlock(&bp->b_interlock);
1461
1462 if (we->we_error == 0) {
1463 mutex_enter(&wl->wl_mtx);
1464 wl->wl_error_count++;
1465 mutex_exit(&wl->wl_mtx);
1466 cv_broadcast(&wl->wl_reclaimable_cv);
1467 }
1468 we->we_error = bp->b_error;
1469 bp->b_error = 0;
1470 brelse(bp);
1471 return;
1472 #else
1473 /* For now, just mark the log permanently errored out */
1474
1475 mutex_enter(&wl->wl_mtx);
1476 if (wl->wl_error_count == 0) {
1477 wl->wl_error_count++;
1478 cv_broadcast(&wl->wl_reclaimable_cv);
1479 }
1480 mutex_exit(&wl->wl_mtx);
1481 #endif
1482 }
1483
1484 /*
1485 * Release the buffer here. wapbl_flush() may wait for the
1486 * log to become empty and we better unbusy the buffer before
1487 * wapbl_flush() returns.
1488 */
1489 brelse(bp, 0);
1490
1491 mutex_enter(&wl->wl_mtx);
1492
1493 KASSERT(we->we_bufcount > 0);
1494 we->we_bufcount--;
1495 #ifdef WAPBL_DEBUG_BUFBYTES
1496 KASSERT(we->we_unsynced_bufbytes >= bufsize);
1497 we->we_unsynced_bufbytes -= bufsize;
1498 KASSERT(wl->wl_unsynced_bufbytes >= bufsize);
1499 wl->wl_unsynced_bufbytes -= bufsize;
1500 #endif
1501
1502 /*
1503 * If the current transaction can be reclaimed, start
1504 * at the beginning and reclaim any consecutive reclaimable
1505 * transactions. If we successfully reclaim anything,
1506 * then wakeup anyone waiting for the reclaim.
1507 */
1508 if (we->we_bufcount == 0) {
1509 size_t delta = 0;
1510 int errcnt = 0;
1511 #ifdef WAPBL_DEBUG_BUFBYTES
1512 KDASSERT(we->we_unsynced_bufbytes == 0);
1513 #endif
1514 /*
1515 * clear any posted error, since the buffer it came from
1516 * has successfully flushed by now
1517 */
1518 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1519 (we->we_bufcount == 0)) {
1520 delta += we->we_reclaimable_bytes;
1521 if (we->we_error)
1522 errcnt++;
1523 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1524 pool_put(&wapbl_entry_pool, we);
1525 }
1526
1527 if (delta) {
1528 wl->wl_reclaimable_bytes += delta;
1529 KASSERT(wl->wl_error_count >= errcnt);
1530 wl->wl_error_count -= errcnt;
1531 cv_broadcast(&wl->wl_reclaimable_cv);
1532 }
1533 }
1534
1535 mutex_exit(&wl->wl_mtx);
1536 }
1537
1538 /*
1539 * wapbl_flush(wl, wait)
1540 *
1541 * Flush pending block writes, deallocations, and inodes from
1542 * the current transaction in memory to the log on disk:
1543 *
1544 * 1. Call the file system's wl_flush callback to flush any
1545 * per-file-system pending updates.
1546 * 2. Wait for enough space in the log for the current transaction.
1547 * 3. Synchronously write the new log records, advancing the
1548 * circular queue head.
1549 * 4. If wait is true, also wait for all the logged writes to
1550 * complete so that the log is empty on return.
1551 *
1552 * On failure, call the file system's wl_flush_abort callback.
1553 */
1554 int
1555 wapbl_flush(struct wapbl *wl, int waitfor)
1556 {
1557 struct buf *bp;
1558 struct wapbl_entry *we;
1559 off_t off;
1560 off_t head;
1561 off_t tail;
1562 size_t delta = 0;
1563 size_t flushsize;
1564 size_t reserved;
1565 int error = 0;
1566
1567 /*
1568 * Do a quick check to see if a full flush can be skipped
1569 * This assumes that the flush callback does not need to be called
1570 * unless there are other outstanding bufs.
1571 */
1572 if (!waitfor) {
1573 size_t nbufs;
1574 mutex_enter(&wl->wl_mtx); /* XXX need mutex here to
1575 protect the KASSERTS */
1576 nbufs = wl->wl_bufcount;
1577 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1578 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1579 mutex_exit(&wl->wl_mtx);
1580 if (nbufs == 0)
1581 return 0;
1582 }
1583
1584 /*
1585 * XXX we may consider using LK_UPGRADE here
1586 * if we want to call flush from inside a transaction
1587 */
1588 rw_enter(&wl->wl_rwlock, RW_WRITER);
1589 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
1590 wl->wl_dealloccnt);
1591
1592 /*
1593 * Now that we are fully locked and flushed,
1594 * do another check for nothing to do.
1595 */
1596 if (wl->wl_bufcount == 0) {
1597 goto wait_out;
1598 }
1599
1600 #if 0
1601 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1602 ("wapbl_flush thread %d.%d flushing entries with "
1603 "bufcount=%zu bufbytes=%zu\n",
1604 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1605 wl->wl_bufbytes));
1606 #endif
1607
1608 /* Calculate amount of space needed to flush */
1609 flushsize = wapbl_transaction_len(wl);
1610 if (wapbl_verbose_commit) {
1611 struct timespec ts;
1612 getnanotime(&ts);
1613 printf("%s: %lld.%09ld this transaction = %zu bytes\n",
1614 __func__, (long long)ts.tv_sec,
1615 (long)ts.tv_nsec, flushsize);
1616 }
1617
1618 if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1619 /*
1620 * XXX this could be handled more gracefully, perhaps place
1621 * only a partial transaction in the log and allow the
1622 * remaining to flush without the protection of the journal.
1623 */
1624 panic("wapbl_flush: current transaction too big to flush");
1625 }
1626
1627 error = wapbl_truncate(wl, flushsize, 0);
1628 if (error)
1629 goto out;
1630
1631 off = wl->wl_head;
1632 KASSERT((off == 0) || (off >= wl->wl_circ_off));
1633 KASSERT((off == 0) || (off < wl->wl_circ_off + wl->wl_circ_size));
1634 error = wapbl_write_blocks(wl, &off);
1635 if (error)
1636 goto out;
1637 error = wapbl_write_revocations(wl, &off);
1638 if (error)
1639 goto out;
1640 error = wapbl_write_inodes(wl, &off);
1641 if (error)
1642 goto out;
1643
1644 reserved = 0;
1645 if (wl->wl_inohashcnt)
1646 reserved = wapbl_transaction_inodes_len(wl);
1647
1648 head = wl->wl_head;
1649 tail = wl->wl_tail;
1650
1651 wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1652 &head, &tail);
1653 #ifdef WAPBL_DEBUG
1654 if (head != off) {
1655 panic("lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1656 " off=%"PRIdMAX" flush=%zu",
1657 (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1658 flushsize);
1659 }
1660 #else
1661 KASSERT(head == off);
1662 #endif
1663
1664 /* Opportunistically move the tail forward if we can */
1665 if (!wapbl_lazy_truncate) {
1666 mutex_enter(&wl->wl_mtx);
1667 delta = wl->wl_reclaimable_bytes;
1668 mutex_exit(&wl->wl_mtx);
1669 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1670 &head, &tail);
1671 }
1672
1673 error = wapbl_write_commit(wl, head, tail);
1674 if (error)
1675 goto out;
1676
1677 we = pool_get(&wapbl_entry_pool, PR_WAITOK);
1678
1679 #ifdef WAPBL_DEBUG_BUFBYTES
1680 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1681 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1682 " unsynced=%zu"
1683 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1684 "inodes=%d\n",
1685 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1686 wapbl_space_used(wl->wl_circ_size, head, tail),
1687 wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1688 wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1689 wl->wl_inohashcnt));
1690 #else
1691 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1692 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1693 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1694 "inodes=%d\n",
1695 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1696 wapbl_space_used(wl->wl_circ_size, head, tail),
1697 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1698 wl->wl_dealloccnt, wl->wl_inohashcnt));
1699 #endif
1700
1701
1702 mutex_enter(&bufcache_lock);
1703 mutex_enter(&wl->wl_mtx);
1704
1705 wl->wl_reserved_bytes = reserved;
1706 wl->wl_head = head;
1707 wl->wl_tail = tail;
1708 KASSERT(wl->wl_reclaimable_bytes >= delta);
1709 wl->wl_reclaimable_bytes -= delta;
1710 wl->wl_dealloccnt = 0;
1711 #ifdef WAPBL_DEBUG_BUFBYTES
1712 wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1713 #endif
1714
1715 we->we_wapbl = wl;
1716 we->we_bufcount = wl->wl_bufcount;
1717 #ifdef WAPBL_DEBUG_BUFBYTES
1718 we->we_unsynced_bufbytes = wl->wl_bufbytes;
1719 #endif
1720 we->we_reclaimable_bytes = flushsize;
1721 we->we_error = 0;
1722 SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1723
1724 /*
1725 * this flushes bufs in reverse order than they were queued
1726 * it shouldn't matter, but if we care we could use TAILQ instead.
1727 * XXX Note they will get put on the lru queue when they flush
1728 * so we might actually want to change this to preserve order.
1729 */
1730 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
1731 if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1732 continue;
1733 }
1734 bp->b_iodone = wapbl_biodone;
1735 bp->b_private = we;
1736 bremfree(bp);
1737 wapbl_remove_buf_locked(wl, bp);
1738 mutex_exit(&wl->wl_mtx);
1739 mutex_exit(&bufcache_lock);
1740 bawrite(bp);
1741 mutex_enter(&bufcache_lock);
1742 mutex_enter(&wl->wl_mtx);
1743 }
1744 mutex_exit(&wl->wl_mtx);
1745 mutex_exit(&bufcache_lock);
1746
1747 #if 0
1748 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1749 ("wapbl_flush thread %d.%d done flushing entries...\n",
1750 curproc->p_pid, curlwp->l_lid));
1751 #endif
1752
1753 wait_out:
1754
1755 /*
1756 * If the waitfor flag is set, don't return until everything is
1757 * fully flushed and the on disk log is empty.
1758 */
1759 if (waitfor) {
1760 error = wapbl_truncate(wl, wl->wl_circ_size -
1761 wl->wl_reserved_bytes, wapbl_lazy_truncate);
1762 }
1763
1764 out:
1765 if (error) {
1766 wl->wl_flush_abort(wl->wl_mount, wl->wl_deallocblks,
1767 wl->wl_dealloclens, wl->wl_dealloccnt);
1768 }
1769
1770 #ifdef WAPBL_DEBUG_PRINT
1771 if (error) {
1772 pid_t pid = -1;
1773 lwpid_t lid = -1;
1774 if (curproc)
1775 pid = curproc->p_pid;
1776 if (curlwp)
1777 lid = curlwp->l_lid;
1778 mutex_enter(&wl->wl_mtx);
1779 #ifdef WAPBL_DEBUG_BUFBYTES
1780 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1781 ("wapbl_flush: thread %d.%d aborted flush: "
1782 "error = %d\n"
1783 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1784 "deallocs=%d inodes=%d\n"
1785 "\terrcnt = %d, reclaimable=%zu reserved=%zu "
1786 "unsynced=%zu\n",
1787 pid, lid, error, wl->wl_bufcount,
1788 wl->wl_bufbytes, wl->wl_bcount,
1789 wl->wl_dealloccnt, wl->wl_inohashcnt,
1790 wl->wl_error_count, wl->wl_reclaimable_bytes,
1791 wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1792 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1793 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1794 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1795 "error = %d, unsynced = %zu\n",
1796 we->we_bufcount, we->we_reclaimable_bytes,
1797 we->we_error, we->we_unsynced_bufbytes));
1798 }
1799 #else
1800 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1801 ("wapbl_flush: thread %d.%d aborted flush: "
1802 "error = %d\n"
1803 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1804 "deallocs=%d inodes=%d\n"
1805 "\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
1806 pid, lid, error, wl->wl_bufcount,
1807 wl->wl_bufbytes, wl->wl_bcount,
1808 wl->wl_dealloccnt, wl->wl_inohashcnt,
1809 wl->wl_error_count, wl->wl_reclaimable_bytes,
1810 wl->wl_reserved_bytes));
1811 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1812 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1813 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1814 "error = %d\n", we->we_bufcount,
1815 we->we_reclaimable_bytes, we->we_error));
1816 }
1817 #endif
1818 mutex_exit(&wl->wl_mtx);
1819 }
1820 #endif
1821
1822 rw_exit(&wl->wl_rwlock);
1823 return error;
1824 }
1825
1826 /****************************************************************/
1827
1828 void
1829 wapbl_jlock_assert(struct wapbl *wl)
1830 {
1831
1832 KASSERT(rw_lock_held(&wl->wl_rwlock));
1833 }
1834
1835 void
1836 wapbl_junlock_assert(struct wapbl *wl)
1837 {
1838
1839 KASSERT(!rw_write_held(&wl->wl_rwlock));
1840 }
1841
1842 /****************************************************************/
1843
1844 /* locks missing */
1845 void
1846 wapbl_print(struct wapbl *wl,
1847 int full,
1848 void (*pr)(const char *, ...))
1849 {
1850 struct buf *bp;
1851 struct wapbl_entry *we;
1852 (*pr)("wapbl %p", wl);
1853 (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
1854 wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
1855 (*pr)("circ = %zu, header = %zu, head = %"PRIdMAX" tail = %"PRIdMAX"\n",
1856 wl->wl_circ_size, wl->wl_circ_off,
1857 (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
1858 (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
1859 wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
1860 #ifdef WAPBL_DEBUG_BUFBYTES
1861 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1862 "reserved = %zu errcnt = %d unsynced = %zu\n",
1863 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1864 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1865 wl->wl_error_count, wl->wl_unsynced_bufbytes);
1866 #else
1867 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1868 "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
1869 wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1870 wl->wl_error_count);
1871 #endif
1872 (*pr)("\tdealloccnt = %d, dealloclim = %d\n",
1873 wl->wl_dealloccnt, wl->wl_dealloclim);
1874 (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
1875 wl->wl_inohashcnt, wl->wl_inohashmask);
1876 (*pr)("entries:\n");
1877 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1878 #ifdef WAPBL_DEBUG_BUFBYTES
1879 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
1880 "unsynced = %zu\n",
1881 we->we_bufcount, we->we_reclaimable_bytes,
1882 we->we_error, we->we_unsynced_bufbytes);
1883 #else
1884 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
1885 we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
1886 #endif
1887 }
1888 if (full) {
1889 int cnt = 0;
1890 (*pr)("bufs =");
1891 LIST_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
1892 if (!LIST_NEXT(bp, b_wapbllist)) {
1893 (*pr)(" %p", bp);
1894 } else if ((++cnt % 6) == 0) {
1895 (*pr)(" %p,\n\t", bp);
1896 } else {
1897 (*pr)(" %p,", bp);
1898 }
1899 }
1900 (*pr)("\n");
1901
1902 (*pr)("dealloced blks = ");
1903 {
1904 int i;
1905 cnt = 0;
1906 for (i = 0; i < wl->wl_dealloccnt; i++) {
1907 (*pr)(" %"PRId64":%d,",
1908 wl->wl_deallocblks[i],
1909 wl->wl_dealloclens[i]);
1910 if ((++cnt % 4) == 0) {
1911 (*pr)("\n\t");
1912 }
1913 }
1914 }
1915 (*pr)("\n");
1916
1917 (*pr)("registered inodes = ");
1918 {
1919 int i;
1920 cnt = 0;
1921 for (i = 0; i <= wl->wl_inohashmask; i++) {
1922 struct wapbl_ino_head *wih;
1923 struct wapbl_ino *wi;
1924
1925 wih = &wl->wl_inohash[i];
1926 LIST_FOREACH(wi, wih, wi_hash) {
1927 if (wi->wi_ino == 0)
1928 continue;
1929 (*pr)(" %"PRIu64"/0%06"PRIo32",",
1930 wi->wi_ino, wi->wi_mode);
1931 if ((++cnt % 4) == 0) {
1932 (*pr)("\n\t");
1933 }
1934 }
1935 }
1936 (*pr)("\n");
1937 }
1938 }
1939 }
1940
1941 #if defined(WAPBL_DEBUG) || defined(DDB)
1942 void
1943 wapbl_dump(struct wapbl *wl)
1944 {
1945 #if defined(WAPBL_DEBUG)
1946 if (!wl)
1947 wl = wapbl_debug_wl;
1948 #endif
1949 if (!wl)
1950 return;
1951 wapbl_print(wl, 1, printf);
1952 }
1953 #endif
1954
1955 /****************************************************************/
1956
1957 void
1958 wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len)
1959 {
1960
1961 wapbl_jlock_assert(wl);
1962
1963 mutex_enter(&wl->wl_mtx);
1964 /* XXX should eventually instead tie this into resource estimation */
1965 /*
1966 * XXX this panic needs locking/mutex analysis and the
1967 * ability to cope with the failure.
1968 */
1969 /* XXX this XXX doesn't have enough XXX */
1970 if (__predict_false(wl->wl_dealloccnt >= wl->wl_dealloclim))
1971 panic("wapbl_register_deallocation: out of resources");
1972
1973 wl->wl_deallocblks[wl->wl_dealloccnt] = blk;
1974 wl->wl_dealloclens[wl->wl_dealloccnt] = len;
1975 wl->wl_dealloccnt++;
1976 WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
1977 ("wapbl_register_deallocation: blk=%"PRId64" len=%d\n", blk, len));
1978 mutex_exit(&wl->wl_mtx);
1979 }
1980
1981 /****************************************************************/
1982
1983 static void
1984 wapbl_inodetrk_init(struct wapbl *wl, u_int size)
1985 {
1986
1987 wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
1988 if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) {
1989 pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0,
1990 "wapblinopl", &pool_allocator_nointr, IPL_NONE);
1991 }
1992 }
1993
1994 static void
1995 wapbl_inodetrk_free(struct wapbl *wl)
1996 {
1997
1998 /* XXX this KASSERT needs locking/mutex analysis */
1999 KASSERT(wl->wl_inohashcnt == 0);
2000 hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
2001 if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) {
2002 pool_destroy(&wapbl_ino_pool);
2003 }
2004 }
2005
2006 static struct wapbl_ino *
2007 wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
2008 {
2009 struct wapbl_ino_head *wih;
2010 struct wapbl_ino *wi;
2011
2012 KASSERT(mutex_owned(&wl->wl_mtx));
2013
2014 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
2015 LIST_FOREACH(wi, wih, wi_hash) {
2016 if (ino == wi->wi_ino)
2017 return wi;
2018 }
2019 return 0;
2020 }
2021
2022 void
2023 wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
2024 {
2025 struct wapbl_ino_head *wih;
2026 struct wapbl_ino *wi;
2027
2028 wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
2029
2030 mutex_enter(&wl->wl_mtx);
2031 if (wapbl_inodetrk_get(wl, ino) == NULL) {
2032 wi->wi_ino = ino;
2033 wi->wi_mode = mode;
2034 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
2035 LIST_INSERT_HEAD(wih, wi, wi_hash);
2036 wl->wl_inohashcnt++;
2037 WAPBL_PRINTF(WAPBL_PRINT_INODE,
2038 ("wapbl_register_inode: ino=%"PRId64"\n", ino));
2039 mutex_exit(&wl->wl_mtx);
2040 } else {
2041 mutex_exit(&wl->wl_mtx);
2042 pool_put(&wapbl_ino_pool, wi);
2043 }
2044 }
2045
2046 void
2047 wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
2048 {
2049 struct wapbl_ino *wi;
2050
2051 mutex_enter(&wl->wl_mtx);
2052 wi = wapbl_inodetrk_get(wl, ino);
2053 if (wi) {
2054 WAPBL_PRINTF(WAPBL_PRINT_INODE,
2055 ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
2056 KASSERT(wl->wl_inohashcnt > 0);
2057 wl->wl_inohashcnt--;
2058 LIST_REMOVE(wi, wi_hash);
2059 mutex_exit(&wl->wl_mtx);
2060
2061 pool_put(&wapbl_ino_pool, wi);
2062 } else {
2063 mutex_exit(&wl->wl_mtx);
2064 }
2065 }
2066
2067 /****************************************************************/
2068
2069 /*
2070 * wapbl_transaction_inodes_len(wl)
2071 *
2072 * Calculate the number of bytes required for inode registration
2073 * log records in wl.
2074 */
2075 static inline size_t
2076 wapbl_transaction_inodes_len(struct wapbl *wl)
2077 {
2078 int blocklen = 1<<wl->wl_log_dev_bshift;
2079 int iph;
2080
2081 /* Calculate number of inodes described in a inodelist header */
2082 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2083 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2084
2085 KASSERT(iph > 0);
2086
2087 return MAX(1, howmany(wl->wl_inohashcnt, iph)) * blocklen;
2088 }
2089
2090
2091 /*
2092 * wapbl_transaction_len(wl)
2093 *
2094 * Calculate number of bytes required for all log records in wl.
2095 */
2096 static size_t
2097 wapbl_transaction_len(struct wapbl *wl)
2098 {
2099 int blocklen = 1<<wl->wl_log_dev_bshift;
2100 size_t len;
2101 int bph;
2102
2103 /* Calculate number of blocks described in a blocklist header */
2104 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
2105 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
2106
2107 KASSERT(bph > 0);
2108
2109 len = wl->wl_bcount;
2110 len += howmany(wl->wl_bufcount, bph) * blocklen;
2111 len += howmany(wl->wl_dealloccnt, bph) * blocklen;
2112 len += wapbl_transaction_inodes_len(wl);
2113
2114 return len;
2115 }
2116
2117 /*
2118 * wapbl_cache_sync(wl, msg)
2119 *
2120 * Issue DIOCCACHESYNC to wl->wl_devvp.
2121 *
2122 * If sysctl(vfs.wapbl.verbose_commit) >= 2, print a message
2123 * including msg about the duration of the cache sync.
2124 */
2125 static int
2126 wapbl_cache_sync(struct wapbl *wl, const char *msg)
2127 {
2128 const bool verbose = wapbl_verbose_commit >= 2;
2129 struct bintime start_time;
2130 int force = 1;
2131 int error;
2132
2133 if (!wapbl_flush_disk_cache) {
2134 return 0;
2135 }
2136 if (verbose) {
2137 bintime(&start_time);
2138 }
2139 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force,
2140 FWRITE, FSCRED);
2141 if (error) {
2142 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
2143 ("wapbl_cache_sync: DIOCCACHESYNC on dev 0x%x "
2144 "returned %d\n", wl->wl_devvp->v_rdev, error));
2145 }
2146 if (verbose) {
2147 struct bintime d;
2148 struct timespec ts;
2149
2150 bintime(&d);
2151 bintime_sub(&d, &start_time);
2152 bintime2timespec(&d, &ts);
2153 printf("wapbl_cache_sync: %s: dev 0x%jx %ju.%09lu\n",
2154 msg, (uintmax_t)wl->wl_devvp->v_rdev,
2155 (uintmax_t)ts.tv_sec, ts.tv_nsec);
2156 }
2157 return error;
2158 }
2159
2160 /*
2161 * wapbl_write_commit(wl, head, tail)
2162 *
2163 * Issue a disk cache sync to wait for all pending writes to the
2164 * log to complete, and then synchronously commit the current
2165 * circular queue head and tail to the log, in the next of two
2166 * locations for commit headers on disk.
2167 *
2168 * Increment the generation number. If the generation number
2169 * rolls over to zero, then a subsequent commit would appear to
2170 * have an older generation than this one -- in that case, issue a
2171 * duplicate commit to avoid this.
2172 *
2173 * => Caller must have exclusive access to wl, either by holding
2174 * wl->wl_rwlock for writer or by being wapbl_start before anyone
2175 * else has seen wl.
2176 */
2177 static int
2178 wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
2179 {
2180 struct wapbl_wc_header *wc = wl->wl_wc_header;
2181 struct timespec ts;
2182 int error;
2183 daddr_t pbn;
2184
2185 error = wapbl_buffered_flush(wl);
2186 if (error)
2187 return error;
2188 /*
2189 * flush disk cache to ensure that blocks we've written are actually
2190 * written to the stable storage before the commit header.
2191 *
2192 * XXX Calc checksum here, instead we do this for now
2193 */
2194 wapbl_cache_sync(wl, "1");
2195
2196 wc->wc_head = head;
2197 wc->wc_tail = tail;
2198 wc->wc_checksum = 0;
2199 wc->wc_version = 1;
2200 getnanotime(&ts);
2201 wc->wc_time = ts.tv_sec;
2202 wc->wc_timensec = ts.tv_nsec;
2203
2204 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2205 ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
2206 (intmax_t)head, (intmax_t)tail));
2207
2208 /*
2209 * write the commit header.
2210 *
2211 * XXX if generation will rollover, then first zero
2212 * over second commit header before trying to write both headers.
2213 */
2214
2215 pbn = wl->wl_logpbn + (wc->wc_generation % 2);
2216 #ifdef _KERNEL
2217 pbn = btodb(pbn << wc->wc_log_dev_bshift);
2218 #endif
2219 error = wapbl_buffered_write(wc, wc->wc_len, wl, pbn);
2220 if (error)
2221 return error;
2222 error = wapbl_buffered_flush(wl);
2223 if (error)
2224 return error;
2225
2226 /*
2227 * flush disk cache to ensure that the commit header is actually
2228 * written before meta data blocks.
2229 */
2230 wapbl_cache_sync(wl, "2");
2231
2232 /*
2233 * If the generation number was zero, write it out a second time.
2234 * This handles initialization and generation number rollover
2235 */
2236 if (wc->wc_generation++ == 0) {
2237 error = wapbl_write_commit(wl, head, tail);
2238 /*
2239 * This panic should be able to be removed if we do the
2240 * zero'ing mentioned above, and we are certain to roll
2241 * back generation number on failure.
2242 */
2243 if (error)
2244 panic("wapbl_write_commit: error writing duplicate "
2245 "log header: %d", error);
2246 }
2247 return 0;
2248 }
2249
2250 /*
2251 * wapbl_write_blocks(wl, offp)
2252 *
2253 * Write all pending physical blocks in the current transaction
2254 * from wapbl_add_buf to the log on disk, adding to the circular
2255 * queue head at byte offset *offp, and returning the new head's
2256 * byte offset in *offp.
2257 */
2258 static int
2259 wapbl_write_blocks(struct wapbl *wl, off_t *offp)
2260 {
2261 struct wapbl_wc_blocklist *wc =
2262 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2263 int blocklen = 1<<wl->wl_log_dev_bshift;
2264 int bph;
2265 struct buf *bp;
2266 off_t off = *offp;
2267 int error;
2268 size_t padding;
2269
2270 KASSERT(rw_write_held(&wl->wl_rwlock));
2271
2272 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
2273 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
2274
2275 bp = LIST_FIRST(&wl->wl_bufs);
2276
2277 while (bp) {
2278 int cnt;
2279 struct buf *obp = bp;
2280
2281 KASSERT(bp->b_flags & B_LOCKED);
2282
2283 wc->wc_type = WAPBL_WC_BLOCKS;
2284 wc->wc_len = blocklen;
2285 wc->wc_blkcount = 0;
2286 while (bp && (wc->wc_blkcount < bph)) {
2287 /*
2288 * Make sure all the physical block numbers are up to
2289 * date. If this is not always true on a given
2290 * filesystem, then VOP_BMAP must be called. We
2291 * could call VOP_BMAP here, or else in the filesystem
2292 * specific flush callback, although neither of those
2293 * solutions allow us to take the vnode lock. If a
2294 * filesystem requires that we must take the vnode lock
2295 * to call VOP_BMAP, then we can probably do it in
2296 * bwrite when the vnode lock should already be held
2297 * by the invoking code.
2298 */
2299 KASSERT((bp->b_vp->v_type == VBLK) ||
2300 (bp->b_blkno != bp->b_lblkno));
2301 KASSERT(bp->b_blkno > 0);
2302
2303 wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
2304 wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
2305 wc->wc_len += bp->b_bcount;
2306 wc->wc_blkcount++;
2307 bp = LIST_NEXT(bp, b_wapbllist);
2308 }
2309 if (wc->wc_len % blocklen != 0) {
2310 padding = blocklen - wc->wc_len % blocklen;
2311 wc->wc_len += padding;
2312 } else {
2313 padding = 0;
2314 }
2315
2316 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2317 ("wapbl_write_blocks: len = %u (padding %zu) off = %"PRIdMAX"\n",
2318 wc->wc_len, padding, (intmax_t)off));
2319
2320 error = wapbl_circ_write(wl, wc, blocklen, &off);
2321 if (error)
2322 return error;
2323 bp = obp;
2324 cnt = 0;
2325 while (bp && (cnt++ < bph)) {
2326 error = wapbl_circ_write(wl, bp->b_data,
2327 bp->b_bcount, &off);
2328 if (error)
2329 return error;
2330 bp = LIST_NEXT(bp, b_wapbllist);
2331 }
2332 if (padding) {
2333 void *zero;
2334
2335 zero = wapbl_alloc(padding);
2336 memset(zero, 0, padding);
2337 error = wapbl_circ_write(wl, zero, padding, &off);
2338 wapbl_free(zero, padding);
2339 if (error)
2340 return error;
2341 }
2342 }
2343 *offp = off;
2344 return 0;
2345 }
2346
2347 /*
2348 * wapbl_write_revocations(wl, offp)
2349 *
2350 * Write all pending deallocations in the current transaction from
2351 * wapbl_register_deallocation to the log on disk, adding to the
2352 * circular queue's head at byte offset *offp, and returning the
2353 * new head's byte offset in *offp.
2354 */
2355 static int
2356 wapbl_write_revocations(struct wapbl *wl, off_t *offp)
2357 {
2358 struct wapbl_wc_blocklist *wc =
2359 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2360 int i;
2361 int blocklen = 1<<wl->wl_log_dev_bshift;
2362 int bph;
2363 off_t off = *offp;
2364 int error;
2365
2366 if (wl->wl_dealloccnt == 0)
2367 return 0;
2368
2369 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
2370 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
2371
2372 i = 0;
2373 while (i < wl->wl_dealloccnt) {
2374 wc->wc_type = WAPBL_WC_REVOCATIONS;
2375 wc->wc_len = blocklen;
2376 wc->wc_blkcount = 0;
2377 while ((i < wl->wl_dealloccnt) && (wc->wc_blkcount < bph)) {
2378 wc->wc_blocks[wc->wc_blkcount].wc_daddr =
2379 wl->wl_deallocblks[i];
2380 wc->wc_blocks[wc->wc_blkcount].wc_dlen =
2381 wl->wl_dealloclens[i];
2382 wc->wc_blkcount++;
2383 i++;
2384 }
2385 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2386 ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
2387 wc->wc_len, (intmax_t)off));
2388 error = wapbl_circ_write(wl, wc, blocklen, &off);
2389 if (error)
2390 return error;
2391 }
2392 *offp = off;
2393 return 0;
2394 }
2395
2396 /*
2397 * wapbl_write_inodes(wl, offp)
2398 *
2399 * Write all pending inode allocations in the current transaction
2400 * from wapbl_register_inode to the log on disk, adding to the
2401 * circular queue's head at byte offset *offp and returning the
2402 * new head's byte offset in *offp.
2403 */
2404 static int
2405 wapbl_write_inodes(struct wapbl *wl, off_t *offp)
2406 {
2407 struct wapbl_wc_inodelist *wc =
2408 (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
2409 int i;
2410 int blocklen = 1 << wl->wl_log_dev_bshift;
2411 off_t off = *offp;
2412 int error;
2413
2414 struct wapbl_ino_head *wih;
2415 struct wapbl_ino *wi;
2416 int iph;
2417
2418 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2419 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2420
2421 i = 0;
2422 wih = &wl->wl_inohash[0];
2423 wi = 0;
2424 do {
2425 wc->wc_type = WAPBL_WC_INODES;
2426 wc->wc_len = blocklen;
2427 wc->wc_inocnt = 0;
2428 wc->wc_clear = (i == 0);
2429 while ((i < wl->wl_inohashcnt) && (wc->wc_inocnt < iph)) {
2430 while (!wi) {
2431 KASSERT((wih - &wl->wl_inohash[0])
2432 <= wl->wl_inohashmask);
2433 wi = LIST_FIRST(wih++);
2434 }
2435 wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2436 wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2437 wc->wc_inocnt++;
2438 i++;
2439 wi = LIST_NEXT(wi, wi_hash);
2440 }
2441 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2442 ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2443 wc->wc_len, (intmax_t)off));
2444 error = wapbl_circ_write(wl, wc, blocklen, &off);
2445 if (error)
2446 return error;
2447 } while (i < wl->wl_inohashcnt);
2448
2449 *offp = off;
2450 return 0;
2451 }
2452
2453 #endif /* _KERNEL */
2454
2455 /****************************************************************/
2456
2457 struct wapbl_blk {
2458 LIST_ENTRY(wapbl_blk) wb_hash;
2459 daddr_t wb_blk;
2460 off_t wb_off; /* Offset of this block in the log */
2461 };
2462 #define WAPBL_BLKPOOL_MIN 83
2463
2464 static void
2465 wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2466 {
2467 if (size < WAPBL_BLKPOOL_MIN)
2468 size = WAPBL_BLKPOOL_MIN;
2469 KASSERT(wr->wr_blkhash == 0);
2470 #ifdef _KERNEL
2471 wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2472 #else /* ! _KERNEL */
2473 /* Manually implement hashinit */
2474 {
2475 unsigned long i, hashsize;
2476 for (hashsize = 1; hashsize < size; hashsize <<= 1)
2477 continue;
2478 wr->wr_blkhash = wapbl_alloc(hashsize * sizeof(*wr->wr_blkhash));
2479 for (i = 0; i < hashsize; i++)
2480 LIST_INIT(&wr->wr_blkhash[i]);
2481 wr->wr_blkhashmask = hashsize - 1;
2482 }
2483 #endif /* ! _KERNEL */
2484 }
2485
2486 static void
2487 wapbl_blkhash_free(struct wapbl_replay *wr)
2488 {
2489 KASSERT(wr->wr_blkhashcnt == 0);
2490 #ifdef _KERNEL
2491 hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2492 #else /* ! _KERNEL */
2493 wapbl_free(wr->wr_blkhash,
2494 (wr->wr_blkhashmask + 1) * sizeof(*wr->wr_blkhash));
2495 #endif /* ! _KERNEL */
2496 }
2497
2498 static struct wapbl_blk *
2499 wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2500 {
2501 struct wapbl_blk_head *wbh;
2502 struct wapbl_blk *wb;
2503 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2504 LIST_FOREACH(wb, wbh, wb_hash) {
2505 if (blk == wb->wb_blk)
2506 return wb;
2507 }
2508 return 0;
2509 }
2510
2511 static void
2512 wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2513 {
2514 struct wapbl_blk_head *wbh;
2515 struct wapbl_blk *wb;
2516 wb = wapbl_blkhash_get(wr, blk);
2517 if (wb) {
2518 KASSERT(wb->wb_blk == blk);
2519 wb->wb_off = off;
2520 } else {
2521 wb = wapbl_alloc(sizeof(*wb));
2522 wb->wb_blk = blk;
2523 wb->wb_off = off;
2524 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2525 LIST_INSERT_HEAD(wbh, wb, wb_hash);
2526 wr->wr_blkhashcnt++;
2527 }
2528 }
2529
2530 static void
2531 wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2532 {
2533 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2534 if (wb) {
2535 KASSERT(wr->wr_blkhashcnt > 0);
2536 wr->wr_blkhashcnt--;
2537 LIST_REMOVE(wb, wb_hash);
2538 wapbl_free(wb, sizeof(*wb));
2539 }
2540 }
2541
2542 static void
2543 wapbl_blkhash_clear(struct wapbl_replay *wr)
2544 {
2545 unsigned long i;
2546 for (i = 0; i <= wr->wr_blkhashmask; i++) {
2547 struct wapbl_blk *wb;
2548
2549 while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2550 KASSERT(wr->wr_blkhashcnt > 0);
2551 wr->wr_blkhashcnt--;
2552 LIST_REMOVE(wb, wb_hash);
2553 wapbl_free(wb, sizeof(*wb));
2554 }
2555 }
2556 KASSERT(wr->wr_blkhashcnt == 0);
2557 }
2558
2559 /****************************************************************/
2560
2561 /*
2562 * wapbl_circ_read(wr, data, len, offp)
2563 *
2564 * Read len bytes into data from the circular queue of wr,
2565 * starting at the linear byte offset *offp, and returning the new
2566 * linear byte offset in *offp.
2567 *
2568 * If the starting linear byte offset precedes wr->wr_circ_off,
2569 * the read instead begins at wr->wr_circ_off. XXX WTF? This
2570 * should be a KASSERT, not a conditional.
2571 */
2572 static int
2573 wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2574 {
2575 size_t slen;
2576 off_t off = *offp;
2577 int error;
2578 daddr_t pbn;
2579
2580 KASSERT(((len >> wr->wr_log_dev_bshift) <<
2581 wr->wr_log_dev_bshift) == len);
2582
2583 if (off < wr->wr_circ_off)
2584 off = wr->wr_circ_off;
2585 slen = wr->wr_circ_off + wr->wr_circ_size - off;
2586 if (slen < len) {
2587 pbn = wr->wr_logpbn + (off >> wr->wr_log_dev_bshift);
2588 #ifdef _KERNEL
2589 pbn = btodb(pbn << wr->wr_log_dev_bshift);
2590 #endif
2591 error = wapbl_read(data, slen, wr->wr_devvp, pbn);
2592 if (error)
2593 return error;
2594 data = (uint8_t *)data + slen;
2595 len -= slen;
2596 off = wr->wr_circ_off;
2597 }
2598 pbn = wr->wr_logpbn + (off >> wr->wr_log_dev_bshift);
2599 #ifdef _KERNEL
2600 pbn = btodb(pbn << wr->wr_log_dev_bshift);
2601 #endif
2602 error = wapbl_read(data, len, wr->wr_devvp, pbn);
2603 if (error)
2604 return error;
2605 off += len;
2606 if (off >= wr->wr_circ_off + wr->wr_circ_size)
2607 off = wr->wr_circ_off;
2608 *offp = off;
2609 return 0;
2610 }
2611
2612 /*
2613 * wapbl_circ_advance(wr, len, offp)
2614 *
2615 * Compute the linear byte offset of the circular queue of wr that
2616 * is len bytes past *offp, and store it in *offp.
2617 *
2618 * This is as if wapbl_circ_read, but without actually reading
2619 * anything.
2620 *
2621 * If the starting linear byte offset precedes wr->wr_circ_off, it
2622 * is taken to be wr->wr_circ_off instead. XXX WTF? This should
2623 * be a KASSERT, not a conditional.
2624 */
2625 static void
2626 wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2627 {
2628 size_t slen;
2629 off_t off = *offp;
2630
2631 KASSERT(((len >> wr->wr_log_dev_bshift) <<
2632 wr->wr_log_dev_bshift) == len);
2633
2634 if (off < wr->wr_circ_off)
2635 off = wr->wr_circ_off;
2636 slen = wr->wr_circ_off + wr->wr_circ_size - off;
2637 if (slen < len) {
2638 len -= slen;
2639 off = wr->wr_circ_off;
2640 }
2641 off += len;
2642 if (off >= wr->wr_circ_off + wr->wr_circ_size)
2643 off = wr->wr_circ_off;
2644 *offp = off;
2645 }
2646
2647 /****************************************************************/
2648
2649 int
2650 wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2651 daddr_t off, size_t count, size_t blksize)
2652 {
2653 struct wapbl_replay *wr;
2654 int error;
2655 struct vnode *devvp;
2656 daddr_t logpbn;
2657 uint8_t *scratch;
2658 struct wapbl_wc_header *wch;
2659 struct wapbl_wc_header *wch2;
2660 /* Use this until we read the actual log header */
2661 int log_dev_bshift = ilog2(blksize);
2662 size_t used;
2663 daddr_t pbn;
2664
2665 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2666 ("wapbl_replay_start: vp=%p off=%"PRId64 " count=%zu blksize=%zu\n",
2667 vp, off, count, blksize));
2668
2669 if (off < 0)
2670 return EINVAL;
2671
2672 if (blksize < DEV_BSIZE)
2673 return EINVAL;
2674 if (blksize % DEV_BSIZE)
2675 return EINVAL;
2676
2677 #ifdef _KERNEL
2678 #if 0
2679 /* XXX vp->v_size isn't reliably set for VBLK devices,
2680 * especially root. However, we might still want to verify
2681 * that the full load is readable */
2682 if ((off + count) * blksize > vp->v_size)
2683 return EINVAL;
2684 #endif
2685 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2686 return error;
2687 }
2688 #else /* ! _KERNEL */
2689 devvp = vp;
2690 logpbn = off;
2691 #endif /* ! _KERNEL */
2692
2693 scratch = wapbl_alloc(MAXBSIZE);
2694
2695 pbn = logpbn;
2696 #ifdef _KERNEL
2697 pbn = btodb(pbn << log_dev_bshift);
2698 #endif
2699 error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, pbn);
2700 if (error)
2701 goto errout;
2702
2703 wch = (struct wapbl_wc_header *)scratch;
2704 wch2 =
2705 (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
2706 /* XXX verify checksums and magic numbers */
2707 if (wch->wc_type != WAPBL_WC_HEADER) {
2708 printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
2709 error = EFTYPE;
2710 goto errout;
2711 }
2712
2713 if (wch2->wc_generation > wch->wc_generation)
2714 wch = wch2;
2715
2716 wr = wapbl_calloc(1, sizeof(*wr));
2717
2718 wr->wr_logvp = vp;
2719 wr->wr_devvp = devvp;
2720 wr->wr_logpbn = logpbn;
2721
2722 wr->wr_scratch = scratch;
2723
2724 wr->wr_log_dev_bshift = wch->wc_log_dev_bshift;
2725 wr->wr_fs_dev_bshift = wch->wc_fs_dev_bshift;
2726 wr->wr_circ_off = wch->wc_circ_off;
2727 wr->wr_circ_size = wch->wc_circ_size;
2728 wr->wr_generation = wch->wc_generation;
2729
2730 used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
2731
2732 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2733 ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
2734 " len=%"PRId64" used=%zu\n",
2735 wch->wc_head, wch->wc_tail, wch->wc_circ_off,
2736 wch->wc_circ_size, used));
2737
2738 wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
2739
2740 error = wapbl_replay_process(wr, wch->wc_head, wch->wc_tail);
2741 if (error) {
2742 wapbl_replay_stop(wr);
2743 wapbl_replay_free(wr);
2744 return error;
2745 }
2746
2747 *wrp = wr;
2748 return 0;
2749
2750 errout:
2751 wapbl_free(scratch, MAXBSIZE);
2752 return error;
2753 }
2754
2755 void
2756 wapbl_replay_stop(struct wapbl_replay *wr)
2757 {
2758
2759 if (!wapbl_replay_isopen(wr))
2760 return;
2761
2762 WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
2763
2764 wapbl_free(wr->wr_scratch, MAXBSIZE);
2765 wr->wr_scratch = NULL;
2766
2767 wr->wr_logvp = NULL;
2768
2769 wapbl_blkhash_clear(wr);
2770 wapbl_blkhash_free(wr);
2771 }
2772
2773 void
2774 wapbl_replay_free(struct wapbl_replay *wr)
2775 {
2776
2777 KDASSERT(!wapbl_replay_isopen(wr));
2778
2779 if (wr->wr_inodes)
2780 wapbl_free(wr->wr_inodes,
2781 wr->wr_inodescnt * sizeof(wr->wr_inodes[0]));
2782 wapbl_free(wr, sizeof(*wr));
2783 }
2784
2785 #ifdef _KERNEL
2786 int
2787 wapbl_replay_isopen1(struct wapbl_replay *wr)
2788 {
2789
2790 return wapbl_replay_isopen(wr);
2791 }
2792 #endif
2793
2794 /*
2795 * calculate the disk address for the i'th block in the wc_blockblist
2796 * offset by j blocks of size blen.
2797 *
2798 * wc_daddr is always a kernel disk address in DEV_BSIZE units that
2799 * was written to the journal.
2800 *
2801 * The kernel needs that address plus the offset in DEV_BSIZE units.
2802 *
2803 * Userland needs that address plus the offset in blen units.
2804 *
2805 */
2806 static daddr_t
2807 wapbl_block_daddr(struct wapbl_wc_blocklist *wc, int i, int j, int blen)
2808 {
2809 daddr_t pbn;
2810
2811 #ifdef _KERNEL
2812 pbn = wc->wc_blocks[i].wc_daddr + btodb(j * blen);
2813 #else
2814 pbn = dbtob(wc->wc_blocks[i].wc_daddr) / blen + j;
2815 #endif
2816
2817 return pbn;
2818 }
2819
2820 static void
2821 wapbl_replay_process_blocks(struct wapbl_replay *wr, off_t *offp)
2822 {
2823 struct wapbl_wc_blocklist *wc =
2824 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2825 int fsblklen = 1 << wr->wr_fs_dev_bshift;
2826 int i, j, n;
2827
2828 for (i = 0; i < wc->wc_blkcount; i++) {
2829 /*
2830 * Enter each physical block into the hashtable independently.
2831 */
2832 n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
2833 for (j = 0; j < n; j++) {
2834 wapbl_blkhash_ins(wr, wapbl_block_daddr(wc, i, j, fsblklen),
2835 *offp);
2836 wapbl_circ_advance(wr, fsblklen, offp);
2837 }
2838 }
2839 }
2840
2841 static void
2842 wapbl_replay_process_revocations(struct wapbl_replay *wr)
2843 {
2844 struct wapbl_wc_blocklist *wc =
2845 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2846 int fsblklen = 1 << wr->wr_fs_dev_bshift;
2847 int i, j, n;
2848
2849 for (i = 0; i < wc->wc_blkcount; i++) {
2850 /*
2851 * Remove any blocks found from the hashtable.
2852 */
2853 n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
2854 for (j = 0; j < n; j++)
2855 wapbl_blkhash_rem(wr, wapbl_block_daddr(wc, i, j, fsblklen));
2856 }
2857 }
2858
2859 static void
2860 wapbl_replay_process_inodes(struct wapbl_replay *wr, off_t oldoff, off_t newoff)
2861 {
2862 struct wapbl_wc_inodelist *wc =
2863 (struct wapbl_wc_inodelist *)wr->wr_scratch;
2864 void *new_inodes;
2865 const size_t oldsize = wr->wr_inodescnt * sizeof(wr->wr_inodes[0]);
2866
2867 KASSERT(sizeof(wr->wr_inodes[0]) == sizeof(wc->wc_inodes[0]));
2868
2869 /*
2870 * Keep track of where we found this so location won't be
2871 * overwritten.
2872 */
2873 if (wc->wc_clear) {
2874 wr->wr_inodestail = oldoff;
2875 wr->wr_inodescnt = 0;
2876 if (wr->wr_inodes != NULL) {
2877 wapbl_free(wr->wr_inodes, oldsize);
2878 wr->wr_inodes = NULL;
2879 }
2880 }
2881 wr->wr_inodeshead = newoff;
2882 if (wc->wc_inocnt == 0)
2883 return;
2884
2885 new_inodes = wapbl_alloc((wr->wr_inodescnt + wc->wc_inocnt) *
2886 sizeof(wr->wr_inodes[0]));
2887 if (wr->wr_inodes != NULL) {
2888 memcpy(new_inodes, wr->wr_inodes, oldsize);
2889 wapbl_free(wr->wr_inodes, oldsize);
2890 }
2891 wr->wr_inodes = new_inodes;
2892 memcpy(&wr->wr_inodes[wr->wr_inodescnt], wc->wc_inodes,
2893 wc->wc_inocnt * sizeof(wr->wr_inodes[0]));
2894 wr->wr_inodescnt += wc->wc_inocnt;
2895 }
2896
2897 static int
2898 wapbl_replay_process(struct wapbl_replay *wr, off_t head, off_t tail)
2899 {
2900 off_t off;
2901 int error;
2902
2903 int logblklen = 1 << wr->wr_log_dev_bshift;
2904
2905 wapbl_blkhash_clear(wr);
2906
2907 off = tail;
2908 while (off != head) {
2909 struct wapbl_wc_null *wcn;
2910 off_t saveoff = off;
2911 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2912 if (error)
2913 goto errout;
2914 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2915 switch (wcn->wc_type) {
2916 case WAPBL_WC_BLOCKS:
2917 wapbl_replay_process_blocks(wr, &off);
2918 break;
2919
2920 case WAPBL_WC_REVOCATIONS:
2921 wapbl_replay_process_revocations(wr);
2922 break;
2923
2924 case WAPBL_WC_INODES:
2925 wapbl_replay_process_inodes(wr, saveoff, off);
2926 break;
2927
2928 default:
2929 printf("Unrecognized wapbl type: 0x%08x\n",
2930 wcn->wc_type);
2931 error = EFTYPE;
2932 goto errout;
2933 }
2934 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2935 if (off != saveoff) {
2936 printf("wapbl_replay: corrupted records\n");
2937 error = EFTYPE;
2938 goto errout;
2939 }
2940 }
2941 return 0;
2942
2943 errout:
2944 wapbl_blkhash_clear(wr);
2945 return error;
2946 }
2947
2948 #if 0
2949 int
2950 wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
2951 {
2952 off_t off;
2953 int mismatchcnt = 0;
2954 int logblklen = 1 << wr->wr_log_dev_bshift;
2955 int fsblklen = 1 << wr->wr_fs_dev_bshift;
2956 void *scratch1 = wapbl_alloc(MAXBSIZE);
2957 void *scratch2 = wapbl_alloc(MAXBSIZE);
2958 int error = 0;
2959
2960 KDASSERT(wapbl_replay_isopen(wr));
2961
2962 off = wch->wc_tail;
2963 while (off != wch->wc_head) {
2964 struct wapbl_wc_null *wcn;
2965 #ifdef DEBUG
2966 off_t saveoff = off;
2967 #endif
2968 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2969 if (error)
2970 goto out;
2971 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2972 switch (wcn->wc_type) {
2973 case WAPBL_WC_BLOCKS:
2974 {
2975 struct wapbl_wc_blocklist *wc =
2976 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2977 int i;
2978 for (i = 0; i < wc->wc_blkcount; i++) {
2979 int foundcnt = 0;
2980 int dirtycnt = 0;
2981 int j, n;
2982 /*
2983 * Check each physical block into the
2984 * hashtable independently
2985 */
2986 n = wc->wc_blocks[i].wc_dlen >>
2987 wch->wc_fs_dev_bshift;
2988 for (j = 0; j < n; j++) {
2989 struct wapbl_blk *wb =
2990 wapbl_blkhash_get(wr,
2991 wapbl_block_daddr(wc, i, j, fsblklen));
2992 if (wb && (wb->wb_off == off)) {
2993 foundcnt++;
2994 error =
2995 wapbl_circ_read(wr,
2996 scratch1, fsblklen,
2997 &off);
2998 if (error)
2999 goto out;
3000 error =
3001 wapbl_read(scratch2,
3002 fsblklen, fsdevvp,
3003 wb->wb_blk);
3004 if (error)
3005 goto out;
3006 if (memcmp(scratch1,
3007 scratch2,
3008 fsblklen)) {
3009 printf(
3010 "wapbl_verify: mismatch block %"PRId64" at off %"PRIdMAX"\n",
3011 wb->wb_blk, (intmax_t)off);
3012 dirtycnt++;
3013 mismatchcnt++;
3014 }
3015 } else {
3016 wapbl_circ_advance(wr,
3017 fsblklen, &off);
3018 }
3019 }
3020 #if 0
3021 /*
3022 * If all of the blocks in an entry
3023 * are clean, then remove all of its
3024 * blocks from the hashtable since they
3025 * never will need replay.
3026 */
3027 if ((foundcnt != 0) &&
3028 (dirtycnt == 0)) {
3029 off = saveoff;
3030 wapbl_circ_advance(wr,
3031 logblklen, &off);
3032 for (j = 0; j < n; j++) {
3033 struct wapbl_blk *wb =
3034 wapbl_blkhash_get(wr,
3035 wapbl_block_daddr(wc, i, j, fsblklen));
3036 if (wb &&
3037 (wb->wb_off == off)) {
3038 wapbl_blkhash_rem(wr, wb->wb_blk);
3039 }
3040 wapbl_circ_advance(wr,
3041 fsblklen, &off);
3042 }
3043 }
3044 #endif
3045 }
3046 }
3047 break;
3048 case WAPBL_WC_REVOCATIONS:
3049 case WAPBL_WC_INODES:
3050 break;
3051 default:
3052 KASSERT(0);
3053 }
3054 #ifdef DEBUG
3055 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
3056 KASSERT(off == saveoff);
3057 #endif
3058 }
3059 out:
3060 wapbl_free(scratch1, MAXBSIZE);
3061 wapbl_free(scratch2, MAXBSIZE);
3062 if (!error && mismatchcnt)
3063 error = EFTYPE;
3064 return error;
3065 }
3066 #endif
3067
3068 int
3069 wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
3070 {
3071 struct wapbl_blk *wb;
3072 size_t i;
3073 off_t off;
3074 void *scratch;
3075 int error = 0;
3076 int fsblklen = 1 << wr->wr_fs_dev_bshift;
3077
3078 KDASSERT(wapbl_replay_isopen(wr));
3079
3080 scratch = wapbl_alloc(MAXBSIZE);
3081
3082 for (i = 0; i <= wr->wr_blkhashmask; ++i) {
3083 LIST_FOREACH(wb, &wr->wr_blkhash[i], wb_hash) {
3084 off = wb->wb_off;
3085 error = wapbl_circ_read(wr, scratch, fsblklen, &off);
3086 if (error)
3087 break;
3088 error = wapbl_write(scratch, fsblklen, fsdevvp,
3089 wb->wb_blk);
3090 if (error)
3091 break;
3092 }
3093 }
3094
3095 wapbl_free(scratch, MAXBSIZE);
3096 return error;
3097 }
3098
3099 int
3100 wapbl_replay_can_read(struct wapbl_replay *wr, daddr_t blk, long len)
3101 {
3102 int fsblklen = 1 << wr->wr_fs_dev_bshift;
3103
3104 KDASSERT(wapbl_replay_isopen(wr));
3105 KASSERT((len % fsblklen) == 0);
3106
3107 while (len != 0) {
3108 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
3109 if (wb)
3110 return 1;
3111 len -= fsblklen;
3112 }
3113 return 0;
3114 }
3115
3116 int
3117 wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
3118 {
3119 int fsblklen = 1 << wr->wr_fs_dev_bshift;
3120
3121 KDASSERT(wapbl_replay_isopen(wr));
3122
3123 KASSERT((len % fsblklen) == 0);
3124
3125 while (len != 0) {
3126 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
3127 if (wb) {
3128 off_t off = wb->wb_off;
3129 int error;
3130 error = wapbl_circ_read(wr, data, fsblklen, &off);
3131 if (error)
3132 return error;
3133 }
3134 data = (uint8_t *)data + fsblklen;
3135 len -= fsblklen;
3136 blk++;
3137 }
3138 return 0;
3139 }
3140
3141 #ifdef _KERNEL
3142
3143 MODULE(MODULE_CLASS_VFS, wapbl, NULL);
3144
3145 static int
3146 wapbl_modcmd(modcmd_t cmd, void *arg)
3147 {
3148
3149 switch (cmd) {
3150 case MODULE_CMD_INIT:
3151 wapbl_init();
3152 return 0;
3153 case MODULE_CMD_FINI:
3154 return wapbl_fini(true);
3155 default:
3156 return ENOTTY;
3157 }
3158 }
3159 #endif /* _KERNEL */
3160