vfs_wapbl.c revision 1.18 1 /* $NetBSD: vfs_wapbl.c,v 1.18 2009/01/31 09:33:36 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2003,2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This implements file system independent write ahead filesystem logging.
34 */
35
36 #define WAPBL_INTERNAL
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.18 2009/01/31 09:33:36 yamt Exp $");
40
41 #include <sys/param.h>
42
43 #ifdef _KERNEL
44 #include <sys/param.h>
45 #include <sys/namei.h>
46 #include <sys/proc.h>
47 #include <sys/uio.h>
48 #include <sys/vnode.h>
49 #include <sys/file.h>
50 #include <sys/kmem.h>
51 #include <sys/resourcevar.h>
52 #include <sys/conf.h>
53 #include <sys/mount.h>
54 #include <sys/kernel.h>
55 #include <sys/kauth.h>
56 #include <sys/mutex.h>
57 #include <sys/atomic.h>
58 #include <sys/wapbl.h>
59 #include <sys/wapbl_replay.h>
60
61 #include <miscfs/specfs/specdev.h>
62
63 #define wapbl_malloc(s) kmem_alloc((s), KM_SLEEP)
64 #define wapbl_free(a, s) kmem_free((a), (s))
65 #define wapbl_calloc(n, s) kmem_zalloc((n)*(s), KM_SLEEP)
66
67 #else /* !_KERNEL */
68 #include <assert.h>
69 #include <errno.h>
70 #include <stdio.h>
71 #include <stdbool.h>
72 #include <stdlib.h>
73 #include <string.h>
74
75 #include <sys/time.h>
76 #include <sys/wapbl.h>
77 #include <sys/wapbl_replay.h>
78
79 #define KDASSERT(x) assert(x)
80 #define KASSERT(x) assert(x)
81 #define wapbl_malloc(s) malloc(s)
82 #define wapbl_free(a, s) free(a)
83 #define wapbl_calloc(n, s) calloc((n), (s))
84
85 #endif /* !_KERNEL */
86
87 /*
88 * INTERNAL DATA STRUCTURES
89 */
90
91 /*
92 * This structure holds per-mount log information.
93 *
94 * Legend: a = atomic access only
95 * r = read-only after init
96 * l = rwlock held
97 * m = mutex held
98 * u = unlocked access ok
99 * b = bufcache_lock held
100 */
101 struct wapbl {
102 struct vnode *wl_logvp; /* r: log here */
103 struct vnode *wl_devvp; /* r: log on this device */
104 struct mount *wl_mount; /* r: mountpoint wl is associated with */
105 daddr_t wl_logpbn; /* r: Physical block number of start of log */
106 int wl_log_dev_bshift; /* r: logarithm of device block size of log
107 device */
108 int wl_fs_dev_bshift; /* r: logarithm of device block size of
109 filesystem device */
110
111 unsigned wl_lock_count; /* m: Count of transactions in progress */
112
113 size_t wl_circ_size; /* r: Number of bytes in buffer of log */
114 size_t wl_circ_off; /* r: Number of bytes reserved at start */
115
116 size_t wl_bufcount_max; /* r: Number of buffers reserved for log */
117 size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */
118
119 off_t wl_head; /* l: Byte offset of log head */
120 off_t wl_tail; /* l: Byte offset of log tail */
121 /*
122 * head == tail == 0 means log is empty
123 * head == tail != 0 means log is full
124 * see assertions in wapbl_advance() for other boundary conditions.
125 * only truncate moves the tail, except when flush sets it to
126 * wl_header_size only flush moves the head, except when truncate
127 * sets it to 0.
128 */
129
130 struct wapbl_wc_header *wl_wc_header; /* l */
131 void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */
132
133 kmutex_t wl_mtx; /* u: short-term lock */
134 krwlock_t wl_rwlock; /* u: File system transaction lock */
135
136 /*
137 * Must be held while accessing
138 * wl_count or wl_bufs or head or tail
139 */
140
141 /*
142 * Callback called from within the flush routine to flush any extra
143 * bits. Note that flush may be skipped without calling this if
144 * there are no outstanding buffers in the transaction.
145 */
146 #if _KERNEL
147 wapbl_flush_fn_t wl_flush; /* r */
148 wapbl_flush_fn_t wl_flush_abort;/* r */
149 #endif
150
151 size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */
152 size_t wl_bufcount; /* m: Count of buffers in wl_bufs */
153 size_t wl_bcount; /* m: Total bcount of wl_bufs */
154
155 LIST_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
156
157 kcondvar_t wl_reclaimable_cv; /* m (obviously) */
158 size_t wl_reclaimable_bytes; /* m: Amount of space available for
159 reclamation by truncate */
160 int wl_error_count; /* m: # of wl_entries with errors */
161 size_t wl_reserved_bytes; /* never truncate log smaller than this */
162
163 #ifdef WAPBL_DEBUG_BUFBYTES
164 size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
165 #endif
166
167 daddr_t *wl_deallocblks;/* l: address of block */
168 int *wl_dealloclens; /* l: size of block (fragments, kom ihg) */
169 int wl_dealloccnt; /* l: total count */
170 int wl_dealloclim; /* l: max count */
171
172 /* hashtable of inode numbers for allocated but unlinked inodes */
173 /* synch ??? */
174 LIST_HEAD(wapbl_ino_head, wapbl_ino) *wl_inohash;
175 u_long wl_inohashmask;
176 int wl_inohashcnt;
177
178 SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* On disk transaction
179 accounting */
180 };
181
182 #ifdef WAPBL_DEBUG_PRINT
183 int wapbl_debug_print = WAPBL_DEBUG_PRINT;
184 #endif
185
186 /****************************************************************/
187 #ifdef _KERNEL
188
189 #ifdef WAPBL_DEBUG
190 struct wapbl *wapbl_debug_wl;
191 #endif
192
193 static int wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail);
194 static int wapbl_write_blocks(struct wapbl *wl, off_t *offp);
195 static int wapbl_write_revocations(struct wapbl *wl, off_t *offp);
196 static int wapbl_write_inodes(struct wapbl *wl, off_t *offp);
197 #endif /* _KERNEL */
198
199 static int wapbl_replay_process(struct wapbl_replay *wr, off_t, off_t);
200
201 static __inline size_t wapbl_space_free(size_t avail, off_t head,
202 off_t tail);
203 static __inline size_t wapbl_space_used(size_t avail, off_t head,
204 off_t tail);
205
206 #ifdef _KERNEL
207
208 #define WAPBL_INODETRK_SIZE 83
209 static int wapbl_ino_pool_refcount;
210 static struct pool wapbl_ino_pool;
211 struct wapbl_ino {
212 LIST_ENTRY(wapbl_ino) wi_hash;
213 ino_t wi_ino;
214 mode_t wi_mode;
215 };
216
217 static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
218 static void wapbl_inodetrk_free(struct wapbl *wl);
219 static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
220
221 static size_t wapbl_transaction_len(struct wapbl *wl);
222 static __inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
223
224 #if 0
225 int wapbl_replay_verify(struct wapbl_replay *, struct vnode *);
226 #endif
227
228 static int wapbl_replay_isopen1(struct wapbl_replay *);
229
230 /*
231 * This is useful for debugging. If set, the log will
232 * only be truncated when necessary.
233 */
234 int wapbl_lazy_truncate = 0;
235
236 struct wapbl_ops wapbl_ops = {
237 .wo_wapbl_discard = wapbl_discard,
238 .wo_wapbl_replay_isopen = wapbl_replay_isopen1,
239 .wo_wapbl_replay_can_read = wapbl_replay_can_read,
240 .wo_wapbl_replay_read = wapbl_replay_read,
241 .wo_wapbl_add_buf = wapbl_add_buf,
242 .wo_wapbl_remove_buf = wapbl_remove_buf,
243 .wo_wapbl_resize_buf = wapbl_resize_buf,
244 .wo_wapbl_begin = wapbl_begin,
245 .wo_wapbl_end = wapbl_end,
246 .wo_wapbl_junlock_assert= wapbl_junlock_assert,
247
248 /* XXX: the following is only used to say "this is a wapbl buf" */
249 .wo_wapbl_biodone = wapbl_biodone,
250 };
251
252 void
253 wapbl_init()
254 {
255
256 /* nothing */
257 }
258
259 static int
260 wapbl_start_flush_inodes(struct wapbl *wl, struct wapbl_replay *wr)
261 {
262 int error, i;
263
264 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
265 ("wapbl_start: reusing log with %d inodes\n", wr->wr_inodescnt));
266
267 /*
268 * Its only valid to reuse the replay log if its
269 * the same as the new log we just opened.
270 */
271 KDASSERT(!wapbl_replay_isopen(wr));
272 KASSERT(wl->wl_devvp->v_rdev == wr->wr_devvp->v_rdev);
273 KASSERT(wl->wl_logpbn == wr->wr_logpbn);
274 KASSERT(wl->wl_circ_size == wr->wr_circ_size);
275 KASSERT(wl->wl_circ_off == wr->wr_circ_off);
276 KASSERT(wl->wl_log_dev_bshift == wr->wr_log_dev_bshift);
277 KASSERT(wl->wl_fs_dev_bshift == wr->wr_fs_dev_bshift);
278
279 wl->wl_wc_header->wc_generation = wr->wr_generation + 1;
280
281 for (i = 0; i < wr->wr_inodescnt; i++)
282 wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
283 wr->wr_inodes[i].wr_imode);
284
285 /* Make sure new transaction won't overwrite old inodes list */
286 KDASSERT(wapbl_transaction_len(wl) <=
287 wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
288 wr->wr_inodestail));
289
290 wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
291 wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
292 wapbl_transaction_len(wl);
293
294 error = wapbl_write_inodes(wl, &wl->wl_head);
295 if (error)
296 return error;
297
298 KASSERT(wl->wl_head != wl->wl_tail);
299 KASSERT(wl->wl_head != 0);
300
301 return 0;
302 }
303
304 int
305 wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
306 daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
307 wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
308 {
309 struct wapbl *wl;
310 struct vnode *devvp;
311 daddr_t logpbn;
312 int error;
313 int log_dev_bshift = DEV_BSHIFT;
314 int fs_dev_bshift = DEV_BSHIFT;
315 int run;
316
317 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_start: vp=%p off=%" PRId64
318 " count=%zu blksize=%zu\n", vp, off, count, blksize));
319
320 if (log_dev_bshift > fs_dev_bshift) {
321 WAPBL_PRINTF(WAPBL_PRINT_OPEN,
322 ("wapbl: log device's block size cannot be larger "
323 "than filesystem's\n"));
324 /*
325 * Not currently implemented, although it could be if
326 * needed someday.
327 */
328 return ENOSYS;
329 }
330
331 if (off < 0)
332 return EINVAL;
333
334 if (blksize < DEV_BSIZE)
335 return EINVAL;
336 if (blksize % DEV_BSIZE)
337 return EINVAL;
338
339 /* XXXTODO: verify that the full load is writable */
340
341 /*
342 * XXX check for minimum log size
343 * minimum is governed by minimum amount of space
344 * to complete a transaction. (probably truncate)
345 */
346 /* XXX for now pick something minimal */
347 if ((count * blksize) < MAXPHYS) {
348 return ENOSPC;
349 }
350
351 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
352 return error;
353 }
354
355 wl = wapbl_calloc(1, sizeof(*wl));
356 rw_init(&wl->wl_rwlock);
357 mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
358 cv_init(&wl->wl_reclaimable_cv, "wapblrec");
359 LIST_INIT(&wl->wl_bufs);
360 SIMPLEQ_INIT(&wl->wl_entries);
361
362 wl->wl_logvp = vp;
363 wl->wl_devvp = devvp;
364 wl->wl_mount = mp;
365 wl->wl_logpbn = logpbn;
366 wl->wl_log_dev_bshift = log_dev_bshift;
367 wl->wl_fs_dev_bshift = fs_dev_bshift;
368
369 wl->wl_flush = flushfn;
370 wl->wl_flush_abort = flushabortfn;
371
372 /* Reserve two log device blocks for the commit headers */
373 wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
374 wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
375 /* truncate the log usage to a multiple of log_dev_bshift */
376 wl->wl_circ_size >>= wl->wl_log_dev_bshift;
377 wl->wl_circ_size <<= wl->wl_log_dev_bshift;
378
379 /*
380 * wl_bufbytes_max limits the size of the in memory transaction space.
381 * - Since buffers are allocated and accounted for in units of
382 * PAGE_SIZE it is required to be a multiple of PAGE_SIZE
383 * (i.e. 1<<PAGE_SHIFT)
384 * - Since the log device has to be written in units of
385 * 1<<wl_log_dev_bshift it is required to be a mulitple of
386 * 1<<wl_log_dev_bshift.
387 * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
388 * it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
389 * Therefore it must be multiple of the least common multiple of those
390 * three quantities. Fortunately, all of those quantities are
391 * guaranteed to be a power of two, and the least common multiple of
392 * a set of numbers which are all powers of two is simply the maximum
393 * of those numbers. Finally, the maximum logarithm of a power of two
394 * is the same as the log of the maximum power of two. So we can do
395 * the following operations to size wl_bufbytes_max:
396 */
397
398 /* XXX fix actual number of pages reserved per filesystem. */
399 wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
400
401 /* Round wl_bufbytes_max to the largest power of two constraint */
402 wl->wl_bufbytes_max >>= PAGE_SHIFT;
403 wl->wl_bufbytes_max <<= PAGE_SHIFT;
404 wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
405 wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
406 wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
407 wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
408
409 /* XXX maybe use filesystem fragment size instead of 1024 */
410 /* XXX fix actual number of buffers reserved per filesystem. */
411 wl->wl_bufcount_max = (nbuf / 2) * 1024;
412
413 /* XXX tie this into resource estimation */
414 wl->wl_dealloclim = 2 * btodb(wl->wl_bufbytes_max);
415
416 wl->wl_deallocblks = wapbl_malloc(sizeof(*wl->wl_deallocblks) *
417 wl->wl_dealloclim);
418 wl->wl_dealloclens = wapbl_malloc(sizeof(*wl->wl_dealloclens) *
419 wl->wl_dealloclim);
420
421 wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
422
423 /* Initialize the commit header */
424 {
425 struct wapbl_wc_header *wc;
426 size_t len = 1 << wl->wl_log_dev_bshift;
427 wc = wapbl_calloc(1, len);
428 wc->wc_type = WAPBL_WC_HEADER;
429 wc->wc_len = len;
430 wc->wc_circ_off = wl->wl_circ_off;
431 wc->wc_circ_size = wl->wl_circ_size;
432 /* XXX wc->wc_fsid */
433 wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
434 wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
435 wl->wl_wc_header = wc;
436 wl->wl_wc_scratch = wapbl_malloc(len);
437 }
438
439 /*
440 * if there was an existing set of unlinked but
441 * allocated inodes, preserve it in the new
442 * log.
443 */
444 if (wr && wr->wr_inodescnt) {
445 error = wapbl_start_flush_inodes(wl, wr);
446 if (error)
447 goto errout;
448 }
449
450 error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
451 if (error) {
452 goto errout;
453 }
454
455 *wlp = wl;
456 #if defined(WAPBL_DEBUG)
457 wapbl_debug_wl = wl;
458 #endif
459
460 return 0;
461 errout:
462 wapbl_discard(wl);
463 wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
464 wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
465 wapbl_free(wl->wl_deallocblks,
466 sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim);
467 wapbl_free(wl->wl_dealloclens,
468 sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim);
469 wapbl_inodetrk_free(wl);
470 wapbl_free(wl, sizeof(*wl));
471
472 return error;
473 }
474
475 /*
476 * Like wapbl_flush, only discards the transaction
477 * completely
478 */
479
480 void
481 wapbl_discard(struct wapbl *wl)
482 {
483 struct wapbl_entry *we;
484 struct buf *bp;
485 int i;
486
487 /*
488 * XXX we may consider using upgrade here
489 * if we want to call flush from inside a transaction
490 */
491 rw_enter(&wl->wl_rwlock, RW_WRITER);
492 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
493 wl->wl_dealloccnt);
494
495 #ifdef WAPBL_DEBUG_PRINT
496 {
497 struct wapbl_entry *we;
498 pid_t pid = -1;
499 lwpid_t lid = -1;
500 if (curproc)
501 pid = curproc->p_pid;
502 if (curlwp)
503 lid = curlwp->l_lid;
504 #ifdef WAPBL_DEBUG_BUFBYTES
505 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
506 ("wapbl_discard: thread %d.%d discarding "
507 "transaction\n"
508 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
509 "deallocs=%d inodes=%d\n"
510 "\terrcnt = %u, reclaimable=%zu reserved=%zu "
511 "unsynced=%zu\n",
512 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
513 wl->wl_bcount, wl->wl_dealloccnt,
514 wl->wl_inohashcnt, wl->wl_error_count,
515 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
516 wl->wl_unsynced_bufbytes));
517 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
518 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
519 ("\tentry: bufcount = %zu, reclaimable = %zu, "
520 "error = %d, unsynced = %zu\n",
521 we->we_bufcount, we->we_reclaimable_bytes,
522 we->we_error, we->we_unsynced_bufbytes));
523 }
524 #else /* !WAPBL_DEBUG_BUFBYTES */
525 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
526 ("wapbl_discard: thread %d.%d discarding transaction\n"
527 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
528 "deallocs=%d inodes=%d\n"
529 "\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
530 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
531 wl->wl_bcount, wl->wl_dealloccnt,
532 wl->wl_inohashcnt, wl->wl_error_count,
533 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes));
534 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
535 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
536 ("\tentry: bufcount = %zu, reclaimable = %zu, "
537 "error = %d\n",
538 we->we_bufcount, we->we_reclaimable_bytes,
539 we->we_error));
540 }
541 #endif /* !WAPBL_DEBUG_BUFBYTES */
542 }
543 #endif /* WAPBL_DEBUG_PRINT */
544
545 for (i = 0; i <= wl->wl_inohashmask; i++) {
546 struct wapbl_ino_head *wih;
547 struct wapbl_ino *wi;
548
549 wih = &wl->wl_inohash[i];
550 while ((wi = LIST_FIRST(wih)) != NULL) {
551 LIST_REMOVE(wi, wi_hash);
552 pool_put(&wapbl_ino_pool, wi);
553 KASSERT(wl->wl_inohashcnt > 0);
554 wl->wl_inohashcnt--;
555 }
556 }
557
558 /*
559 * clean buffer list
560 */
561 mutex_enter(&bufcache_lock);
562 mutex_enter(&wl->wl_mtx);
563 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
564 if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
565 /*
566 * The buffer will be unlocked and
567 * removed from the transaction in brelse
568 */
569 mutex_exit(&wl->wl_mtx);
570 brelsel(bp, 0);
571 mutex_enter(&wl->wl_mtx);
572 }
573 }
574 mutex_exit(&wl->wl_mtx);
575 mutex_exit(&bufcache_lock);
576
577 /*
578 * Remove references to this wl from wl_entries, free any which
579 * no longer have buffers, others will be freed in wapbl_biodone
580 * when they no longer have any buffers.
581 */
582 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
583 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
584 /* XXX should we be accumulating wl_error_count
585 * and increasing reclaimable bytes ? */
586 we->we_wapbl = NULL;
587 if (we->we_bufcount == 0) {
588 #ifdef WAPBL_DEBUG_BUFBYTES
589 KASSERT(we->we_unsynced_bufbytes == 0);
590 #endif
591 wapbl_free(we, sizeof(*we));
592 }
593 }
594
595 /* Discard list of deallocs */
596 wl->wl_dealloccnt = 0;
597 /* XXX should we clear wl_reserved_bytes? */
598
599 KASSERT(wl->wl_bufbytes == 0);
600 KASSERT(wl->wl_bcount == 0);
601 KASSERT(wl->wl_bufcount == 0);
602 KASSERT(LIST_EMPTY(&wl->wl_bufs));
603 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
604 KASSERT(wl->wl_inohashcnt == 0);
605
606 rw_exit(&wl->wl_rwlock);
607 }
608
609 int
610 wapbl_stop(struct wapbl *wl, int force)
611 {
612 struct vnode *vp;
613 int error;
614
615 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
616 error = wapbl_flush(wl, 1);
617 if (error) {
618 if (force)
619 wapbl_discard(wl);
620 else
621 return error;
622 }
623
624 /* Unlinked inodes persist after a flush */
625 if (wl->wl_inohashcnt) {
626 if (force) {
627 wapbl_discard(wl);
628 } else {
629 return EBUSY;
630 }
631 }
632
633 KASSERT(wl->wl_bufbytes == 0);
634 KASSERT(wl->wl_bcount == 0);
635 KASSERT(wl->wl_bufcount == 0);
636 KASSERT(LIST_EMPTY(&wl->wl_bufs));
637 KASSERT(wl->wl_dealloccnt == 0);
638 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
639 KASSERT(wl->wl_inohashcnt == 0);
640
641 vp = wl->wl_logvp;
642
643 wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
644 wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
645 wapbl_free(wl->wl_deallocblks,
646 sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim);
647 wapbl_free(wl->wl_dealloclens,
648 sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim);
649 wapbl_inodetrk_free(wl);
650
651 cv_destroy(&wl->wl_reclaimable_cv);
652 mutex_destroy(&wl->wl_mtx);
653 rw_destroy(&wl->wl_rwlock);
654 wapbl_free(wl, sizeof(*wl));
655
656 return 0;
657 }
658
659 static int
660 wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
661 {
662 struct pstats *pstats = curlwp->l_proc->p_stats;
663 struct buf *bp;
664 int error;
665
666 KASSERT((flags & ~(B_WRITE | B_READ)) == 0);
667 KASSERT(devvp->v_type == VBLK);
668
669 if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
670 mutex_enter(&devvp->v_interlock);
671 devvp->v_numoutput++;
672 mutex_exit(&devvp->v_interlock);
673 pstats->p_ru.ru_oublock++;
674 } else {
675 pstats->p_ru.ru_inblock++;
676 }
677
678 bp = getiobuf(devvp, true);
679 bp->b_flags = flags;
680 bp->b_cflags = BC_BUSY; /* silly & dubious */
681 bp->b_dev = devvp->v_rdev;
682 bp->b_data = data;
683 bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
684 bp->b_blkno = pbn;
685
686 WAPBL_PRINTF(WAPBL_PRINT_IO,
687 ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%x\n",
688 BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
689 bp->b_blkno, bp->b_dev));
690
691 VOP_STRATEGY(devvp, bp);
692
693 error = biowait(bp);
694 putiobuf(bp);
695
696 if (error) {
697 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
698 ("wapbl_doio: %s %zu bytes at block %" PRId64
699 " on dev 0x%x failed with error %d\n",
700 (((flags & (B_WRITE | B_READ)) == B_WRITE) ?
701 "write" : "read"),
702 len, pbn, devvp->v_rdev, error));
703 }
704
705 return error;
706 }
707
708 int
709 wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
710 {
711
712 return wapbl_doio(data, len, devvp, pbn, B_WRITE);
713 }
714
715 int
716 wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
717 {
718
719 return wapbl_doio(data, len, devvp, pbn, B_READ);
720 }
721
722 /*
723 * Off is byte offset returns new offset for next write
724 * handles log wraparound
725 */
726 static int
727 wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
728 {
729 size_t slen;
730 off_t off = *offp;
731 int error;
732
733 KDASSERT(((len >> wl->wl_log_dev_bshift) <<
734 wl->wl_log_dev_bshift) == len);
735
736 if (off < wl->wl_circ_off)
737 off = wl->wl_circ_off;
738 slen = wl->wl_circ_off + wl->wl_circ_size - off;
739 if (slen < len) {
740 error = wapbl_write(data, slen, wl->wl_devvp,
741 wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
742 if (error)
743 return error;
744 data = (uint8_t *)data + slen;
745 len -= slen;
746 off = wl->wl_circ_off;
747 }
748 error = wapbl_write(data, len, wl->wl_devvp,
749 wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
750 if (error)
751 return error;
752 off += len;
753 if (off >= wl->wl_circ_off + wl->wl_circ_size)
754 off = wl->wl_circ_off;
755 *offp = off;
756 return 0;
757 }
758
759 /****************************************************************/
760
761 int
762 wapbl_begin(struct wapbl *wl, const char *file, int line)
763 {
764 int doflush;
765 unsigned lockcount;
766 krw_t op;
767
768 KDASSERT(wl);
769
770 /*
771 * XXX: The original code calls for the use of a RW_READER lock
772 * here, but it turns out there are performance issues with high
773 * metadata-rate workloads (e.g. multiple simultaneous tar
774 * extractions). For now, we force the lock to be RW_WRITER,
775 * since that currently has the best performance characteristics
776 * (even for a single tar-file extraction).
777 *
778 */
779 #define WAPBL_DEBUG_SERIALIZE 1
780
781 #ifdef WAPBL_DEBUG_SERIALIZE
782 op = RW_WRITER;
783 #else
784 op = RW_READER;
785 #endif
786
787 /*
788 * XXX this needs to be made much more sophisticated.
789 * perhaps each wapbl_begin could reserve a specified
790 * number of buffers and bytes.
791 */
792 mutex_enter(&wl->wl_mtx);
793 lockcount = wl->wl_lock_count;
794 doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) >
795 wl->wl_bufbytes_max / 2) ||
796 ((wl->wl_bufcount + (lockcount * 10)) >
797 wl->wl_bufcount_max / 2) ||
798 (wapbl_transaction_len(wl) > wl->wl_circ_size / 2);
799 mutex_exit(&wl->wl_mtx);
800
801 if (doflush) {
802 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
803 ("force flush lockcnt=%d bufbytes=%zu "
804 "(max=%zu) bufcount=%zu (max=%zu)\n",
805 lockcount, wl->wl_bufbytes,
806 wl->wl_bufbytes_max, wl->wl_bufcount,
807 wl->wl_bufcount_max));
808 }
809
810 if (doflush) {
811 int error = wapbl_flush(wl, 0);
812 if (error)
813 return error;
814 }
815
816 rw_enter(&wl->wl_rwlock, op);
817 mutex_enter(&wl->wl_mtx);
818 wl->wl_lock_count++;
819 mutex_exit(&wl->wl_mtx);
820
821 #if defined(WAPBL_DEBUG_PRINT) && defined(WAPBL_DEBUG_SERIALIZE)
822 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
823 ("wapbl_begin thread %d.%d with bufcount=%zu "
824 "bufbytes=%zu bcount=%zu at %s:%d\n",
825 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
826 wl->wl_bufbytes, wl->wl_bcount, file, line));
827 #endif
828
829 return 0;
830 }
831
832 void
833 wapbl_end(struct wapbl *wl)
834 {
835
836 #if defined(WAPBL_DEBUG_PRINT) && defined(WAPBL_DEBUG_SERIALIZE)
837 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
838 ("wapbl_end thread %d.%d with bufcount=%zu "
839 "bufbytes=%zu bcount=%zu\n",
840 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
841 wl->wl_bufbytes, wl->wl_bcount));
842 #endif
843
844 mutex_enter(&wl->wl_mtx);
845 KASSERT(wl->wl_lock_count > 0);
846 wl->wl_lock_count--;
847 mutex_exit(&wl->wl_mtx);
848
849 rw_exit(&wl->wl_rwlock);
850 }
851
852 void
853 wapbl_add_buf(struct wapbl *wl, struct buf * bp)
854 {
855
856 KASSERT(bp->b_cflags & BC_BUSY);
857 KASSERT(bp->b_vp);
858
859 wapbl_jlock_assert(wl);
860
861 #if 0
862 /*
863 * XXX this might be an issue for swapfiles.
864 * see uvm_swap.c:1702
865 *
866 * XXX2 why require it then? leap of semantics?
867 */
868 KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
869 #endif
870
871 mutex_enter(&wl->wl_mtx);
872 if (bp->b_flags & B_LOCKED) {
873 LIST_REMOVE(bp, b_wapbllist);
874 WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
875 ("wapbl_add_buf thread %d.%d re-adding buf %p "
876 "with %d bytes %d bcount\n",
877 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
878 bp->b_bcount));
879 } else {
880 /* unlocked by dirty buffers shouldn't exist */
881 KASSERT(!(bp->b_oflags & BO_DELWRI));
882 wl->wl_bufbytes += bp->b_bufsize;
883 wl->wl_bcount += bp->b_bcount;
884 wl->wl_bufcount++;
885 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
886 ("wapbl_add_buf thread %d.%d adding buf %p "
887 "with %d bytes %d bcount\n",
888 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
889 bp->b_bcount));
890 }
891 LIST_INSERT_HEAD(&wl->wl_bufs, bp, b_wapbllist);
892 mutex_exit(&wl->wl_mtx);
893
894 bp->b_flags |= B_LOCKED;
895 }
896
897 static void
898 wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
899 {
900
901 KASSERT(mutex_owned(&wl->wl_mtx));
902 KASSERT(bp->b_cflags & BC_BUSY);
903 wapbl_jlock_assert(wl);
904
905 #if 0
906 /*
907 * XXX this might be an issue for swapfiles.
908 * see uvm_swap.c:1725
909 *
910 * XXXdeux: see above
911 */
912 KASSERT((bp->b_flags & BC_NOCACHE) == 0);
913 #endif
914 KASSERT(bp->b_flags & B_LOCKED);
915
916 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
917 ("wapbl_remove_buf thread %d.%d removing buf %p with "
918 "%d bytes %d bcount\n",
919 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, bp->b_bcount));
920
921 KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
922 wl->wl_bufbytes -= bp->b_bufsize;
923 KASSERT(wl->wl_bcount >= bp->b_bcount);
924 wl->wl_bcount -= bp->b_bcount;
925 KASSERT(wl->wl_bufcount > 0);
926 wl->wl_bufcount--;
927 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
928 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
929 LIST_REMOVE(bp, b_wapbllist);
930
931 bp->b_flags &= ~B_LOCKED;
932 }
933
934 /* called from brelsel() in vfs_bio among other places */
935 void
936 wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
937 {
938
939 mutex_enter(&wl->wl_mtx);
940 wapbl_remove_buf_locked(wl, bp);
941 mutex_exit(&wl->wl_mtx);
942 }
943
944 void
945 wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
946 {
947
948 KASSERT(bp->b_cflags & BC_BUSY);
949
950 /*
951 * XXX: why does this depend on B_LOCKED? otherwise the buf
952 * is not for a transaction? if so, why is this called in the
953 * first place?
954 */
955 if (bp->b_flags & B_LOCKED) {
956 mutex_enter(&wl->wl_mtx);
957 wl->wl_bufbytes += bp->b_bufsize - oldsz;
958 wl->wl_bcount += bp->b_bcount - oldcnt;
959 mutex_exit(&wl->wl_mtx);
960 }
961 }
962
963 #endif /* _KERNEL */
964
965 /****************************************************************/
966 /* Some utility inlines */
967
968 /* This is used to advance the pointer at old to new value at old+delta */
969 static __inline off_t
970 wapbl_advance(size_t size, size_t off, off_t old, size_t delta)
971 {
972 off_t new;
973
974 /* Define acceptable ranges for inputs. */
975 KASSERT(delta <= size);
976 KASSERT((old == 0) || (old >= off));
977 KASSERT(old < (size + off));
978
979 if ((old == 0) && (delta != 0))
980 new = off + delta;
981 else if ((old + delta) < (size + off))
982 new = old + delta;
983 else
984 new = (old + delta) - size;
985
986 /* Note some interesting axioms */
987 KASSERT((delta != 0) || (new == old));
988 KASSERT((delta == 0) || (new != 0));
989 KASSERT((delta != (size)) || (new == old));
990
991 /* Define acceptable ranges for output. */
992 KASSERT((new == 0) || (new >= off));
993 KASSERT(new < (size + off));
994 return new;
995 }
996
997 static __inline size_t
998 wapbl_space_used(size_t avail, off_t head, off_t tail)
999 {
1000
1001 if (tail == 0) {
1002 KASSERT(head == 0);
1003 return 0;
1004 }
1005 return ((head + (avail - 1) - tail) % avail) + 1;
1006 }
1007
1008 static __inline size_t
1009 wapbl_space_free(size_t avail, off_t head, off_t tail)
1010 {
1011
1012 return avail - wapbl_space_used(avail, head, tail);
1013 }
1014
1015 static __inline void
1016 wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1017 off_t *tailp)
1018 {
1019 off_t head = *headp;
1020 off_t tail = *tailp;
1021
1022 KASSERT(delta <= wapbl_space_free(size, head, tail));
1023 head = wapbl_advance(size, off, head, delta);
1024 if ((tail == 0) && (head != 0))
1025 tail = off;
1026 *headp = head;
1027 *tailp = tail;
1028 }
1029
1030 static __inline void
1031 wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1032 off_t *tailp)
1033 {
1034 off_t head = *headp;
1035 off_t tail = *tailp;
1036
1037 KASSERT(delta <= wapbl_space_used(size, head, tail));
1038 tail = wapbl_advance(size, off, tail, delta);
1039 if (head == tail) {
1040 head = tail = 0;
1041 }
1042 *headp = head;
1043 *tailp = tail;
1044 }
1045
1046 #ifdef _KERNEL
1047
1048 /****************************************************************/
1049
1050 /*
1051 * Remove transactions whose buffers are completely flushed to disk.
1052 * Will block until at least minfree space is available.
1053 * only intended to be called from inside wapbl_flush and therefore
1054 * does not protect against commit races with itself or with flush.
1055 */
1056 static int
1057 wapbl_truncate(struct wapbl *wl, size_t minfree, int waitonly)
1058 {
1059 size_t delta;
1060 size_t avail;
1061 off_t head;
1062 off_t tail;
1063 int error = 0;
1064
1065 KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1066 KASSERT(rw_write_held(&wl->wl_rwlock));
1067
1068 mutex_enter(&wl->wl_mtx);
1069
1070 /*
1071 * First check to see if we have to do a commit
1072 * at all.
1073 */
1074 avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1075 if (minfree < avail) {
1076 mutex_exit(&wl->wl_mtx);
1077 return 0;
1078 }
1079 minfree -= avail;
1080 while ((wl->wl_error_count == 0) &&
1081 (wl->wl_reclaimable_bytes < minfree)) {
1082 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1083 ("wapbl_truncate: sleeping on %p wl=%p bytes=%zd "
1084 "minfree=%zd\n",
1085 &wl->wl_reclaimable_bytes, wl, wl->wl_reclaimable_bytes,
1086 minfree));
1087
1088 cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1089 }
1090 if (wl->wl_reclaimable_bytes < minfree) {
1091 KASSERT(wl->wl_error_count);
1092 /* XXX maybe get actual error from buffer instead someday? */
1093 error = EIO;
1094 }
1095 head = wl->wl_head;
1096 tail = wl->wl_tail;
1097 delta = wl->wl_reclaimable_bytes;
1098
1099 /* If all of of the entries are flushed, then be sure to keep
1100 * the reserved bytes reserved. Watch out for discarded transactions,
1101 * which could leave more bytes reserved than are reclaimable.
1102 */
1103 if (SIMPLEQ_EMPTY(&wl->wl_entries) &&
1104 (delta >= wl->wl_reserved_bytes)) {
1105 delta -= wl->wl_reserved_bytes;
1106 }
1107 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1108 &tail);
1109 KDASSERT(wl->wl_reserved_bytes <=
1110 wapbl_space_used(wl->wl_circ_size, head, tail));
1111 mutex_exit(&wl->wl_mtx);
1112
1113 if (error)
1114 return error;
1115
1116 if (waitonly)
1117 return 0;
1118
1119 /*
1120 * This is where head, tail and delta are unprotected
1121 * from races against itself or flush. This is ok since
1122 * we only call this routine from inside flush itself.
1123 *
1124 * XXX: how can it race against itself when accessed only
1125 * from behind the write-locked rwlock?
1126 */
1127 error = wapbl_write_commit(wl, head, tail);
1128 if (error)
1129 return error;
1130
1131 wl->wl_head = head;
1132 wl->wl_tail = tail;
1133
1134 mutex_enter(&wl->wl_mtx);
1135 KASSERT(wl->wl_reclaimable_bytes >= delta);
1136 wl->wl_reclaimable_bytes -= delta;
1137 mutex_exit(&wl->wl_mtx);
1138 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1139 ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1140 curproc->p_pid, curlwp->l_lid, delta));
1141
1142 return 0;
1143 }
1144
1145 /****************************************************************/
1146
1147 void
1148 wapbl_biodone(struct buf *bp)
1149 {
1150 struct wapbl_entry *we = bp->b_private;
1151 struct wapbl *wl = we->we_wapbl;
1152
1153 /*
1154 * Handle possible flushing of buffers after log has been
1155 * decomissioned.
1156 */
1157 if (!wl) {
1158 KASSERT(we->we_bufcount > 0);
1159 we->we_bufcount--;
1160 #ifdef WAPBL_DEBUG_BUFBYTES
1161 KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1162 we->we_unsynced_bufbytes -= bp->b_bufsize;
1163 #endif
1164
1165 if (we->we_bufcount == 0) {
1166 #ifdef WAPBL_DEBUG_BUFBYTES
1167 KASSERT(we->we_unsynced_bufbytes == 0);
1168 #endif
1169 wapbl_free(we, sizeof(*we));
1170 }
1171
1172 brelse(bp, 0);
1173 return;
1174 }
1175
1176 #ifdef ohbother
1177 KDASSERT(bp->b_flags & B_DONE);
1178 KDASSERT(!(bp->b_flags & B_DELWRI));
1179 KDASSERT(bp->b_flags & B_ASYNC);
1180 KDASSERT(bp->b_flags & B_BUSY);
1181 KDASSERT(!(bp->b_flags & B_LOCKED));
1182 KDASSERT(!(bp->b_flags & B_READ));
1183 KDASSERT(!(bp->b_flags & B_INVAL));
1184 KDASSERT(!(bp->b_flags & B_NOCACHE));
1185 #endif
1186
1187 if (bp->b_error) {
1188 #ifdef notyet /* Can't currently handle possible dirty buffer reuse */
1189 XXXpooka: interfaces not fully updated
1190 Note: this was not enabled in the original patch
1191 against netbsd4 either. I don't know if comment
1192 above is true or not.
1193
1194 /*
1195 * If an error occurs, report the error and leave the
1196 * buffer as a delayed write on the LRU queue.
1197 * restarting the write would likely result in
1198 * an error spinloop, so let it be done harmlessly
1199 * by the syncer.
1200 */
1201 bp->b_flags &= ~(B_DONE);
1202 simple_unlock(&bp->b_interlock);
1203
1204 if (we->we_error == 0) {
1205 mutex_enter(&wl->wl_mtx);
1206 wl->wl_error_count++;
1207 mutex_exit(&wl->wl_mtx);
1208 cv_broadcast(&wl->wl_reclaimable_cv);
1209 }
1210 we->we_error = bp->b_error;
1211 bp->b_error = 0;
1212 brelse(bp);
1213 return;
1214 #else
1215 /* For now, just mark the log permanently errored out */
1216
1217 mutex_enter(&wl->wl_mtx);
1218 if (wl->wl_error_count == 0) {
1219 wl->wl_error_count++;
1220 cv_broadcast(&wl->wl_reclaimable_cv);
1221 }
1222 mutex_exit(&wl->wl_mtx);
1223 #endif
1224 }
1225
1226 mutex_enter(&wl->wl_mtx);
1227
1228 KASSERT(we->we_bufcount > 0);
1229 we->we_bufcount--;
1230 #ifdef WAPBL_DEBUG_BUFBYTES
1231 KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1232 we->we_unsynced_bufbytes -= bp->b_bufsize;
1233 KASSERT(wl->wl_unsynced_bufbytes >= bp->b_bufsize);
1234 wl->wl_unsynced_bufbytes -= bp->b_bufsize;
1235 #endif
1236
1237 /*
1238 * If the current transaction can be reclaimed, start
1239 * at the beginning and reclaim any consecutive reclaimable
1240 * transactions. If we successfully reclaim anything,
1241 * then wakeup anyone waiting for the reclaim.
1242 */
1243 if (we->we_bufcount == 0) {
1244 size_t delta = 0;
1245 int errcnt = 0;
1246 #ifdef WAPBL_DEBUG_BUFBYTES
1247 KDASSERT(we->we_unsynced_bufbytes == 0);
1248 #endif
1249 /*
1250 * clear any posted error, since the buffer it came from
1251 * has successfully flushed by now
1252 */
1253 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1254 (we->we_bufcount == 0)) {
1255 delta += we->we_reclaimable_bytes;
1256 if (we->we_error)
1257 errcnt++;
1258 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1259 wapbl_free(we, sizeof(*we));
1260 }
1261
1262 if (delta) {
1263 wl->wl_reclaimable_bytes += delta;
1264 KASSERT(wl->wl_error_count >= errcnt);
1265 wl->wl_error_count -= errcnt;
1266 cv_broadcast(&wl->wl_reclaimable_cv);
1267 }
1268 }
1269
1270 mutex_exit(&wl->wl_mtx);
1271 brelse(bp, 0);
1272 }
1273
1274 /*
1275 * Write transactions to disk + start I/O for contents
1276 */
1277 int
1278 wapbl_flush(struct wapbl *wl, int waitfor)
1279 {
1280 struct buf *bp;
1281 struct wapbl_entry *we;
1282 off_t off;
1283 off_t head;
1284 off_t tail;
1285 size_t delta = 0;
1286 size_t flushsize;
1287 size_t reserved;
1288 int error = 0;
1289
1290 /*
1291 * Do a quick check to see if a full flush can be skipped
1292 * This assumes that the flush callback does not need to be called
1293 * unless there are other outstanding bufs.
1294 */
1295 if (!waitfor) {
1296 size_t nbufs;
1297 mutex_enter(&wl->wl_mtx); /* XXX need mutex here to
1298 protect the KASSERTS */
1299 nbufs = wl->wl_bufcount;
1300 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1301 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1302 mutex_exit(&wl->wl_mtx);
1303 if (nbufs == 0)
1304 return 0;
1305 }
1306
1307 /*
1308 * XXX we may consider using LK_UPGRADE here
1309 * if we want to call flush from inside a transaction
1310 */
1311 rw_enter(&wl->wl_rwlock, RW_WRITER);
1312 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
1313 wl->wl_dealloccnt);
1314
1315 /*
1316 * Now that we are fully locked and flushed,
1317 * do another check for nothing to do.
1318 */
1319 if (wl->wl_bufcount == 0) {
1320 goto out;
1321 }
1322
1323 #if 0
1324 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1325 ("wapbl_flush thread %d.%d flushing entries with "
1326 "bufcount=%zu bufbytes=%zu\n",
1327 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1328 wl->wl_bufbytes));
1329 #endif
1330
1331 /* Calculate amount of space needed to flush */
1332 flushsize = wapbl_transaction_len(wl);
1333
1334 if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1335 /*
1336 * XXX this could be handled more gracefully, perhaps place
1337 * only a partial transaction in the log and allow the
1338 * remaining to flush without the protection of the journal.
1339 */
1340 panic("wapbl_flush: current transaction too big to flush\n");
1341 }
1342
1343 error = wapbl_truncate(wl, flushsize, 0);
1344 if (error)
1345 goto out2;
1346
1347 off = wl->wl_head;
1348 KASSERT((off == 0) || ((off >= wl->wl_circ_off) &&
1349 (off < wl->wl_circ_off + wl->wl_circ_size)));
1350 error = wapbl_write_blocks(wl, &off);
1351 if (error)
1352 goto out2;
1353 error = wapbl_write_revocations(wl, &off);
1354 if (error)
1355 goto out2;
1356 error = wapbl_write_inodes(wl, &off);
1357 if (error)
1358 goto out2;
1359
1360 reserved = 0;
1361 if (wl->wl_inohashcnt)
1362 reserved = wapbl_transaction_inodes_len(wl);
1363
1364 head = wl->wl_head;
1365 tail = wl->wl_tail;
1366
1367 wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1368 &head, &tail);
1369 #ifdef WAPBL_DEBUG
1370 if (head != off) {
1371 panic("lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1372 " off=%"PRIdMAX" flush=%zu\n",
1373 (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1374 flushsize);
1375 }
1376 #else
1377 KASSERT(head == off);
1378 #endif
1379
1380 /* Opportunistically move the tail forward if we can */
1381 if (!wapbl_lazy_truncate) {
1382 mutex_enter(&wl->wl_mtx);
1383 delta = wl->wl_reclaimable_bytes;
1384 mutex_exit(&wl->wl_mtx);
1385 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1386 &head, &tail);
1387 }
1388
1389 error = wapbl_write_commit(wl, head, tail);
1390 if (error)
1391 goto out2;
1392
1393 we = wapbl_calloc(1, sizeof(*we));
1394
1395 #ifdef WAPBL_DEBUG_BUFBYTES
1396 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1397 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1398 " unsynced=%zu"
1399 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1400 "inodes=%d\n",
1401 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1402 wapbl_space_used(wl->wl_circ_size, head, tail),
1403 wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1404 wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1405 wl->wl_inohashcnt));
1406 #else
1407 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1408 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1409 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1410 "inodes=%d\n",
1411 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1412 wapbl_space_used(wl->wl_circ_size, head, tail),
1413 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1414 wl->wl_dealloccnt, wl->wl_inohashcnt));
1415 #endif
1416
1417
1418 mutex_enter(&bufcache_lock);
1419 mutex_enter(&wl->wl_mtx);
1420
1421 wl->wl_reserved_bytes = reserved;
1422 wl->wl_head = head;
1423 wl->wl_tail = tail;
1424 KASSERT(wl->wl_reclaimable_bytes >= delta);
1425 wl->wl_reclaimable_bytes -= delta;
1426 wl->wl_dealloccnt = 0;
1427 #ifdef WAPBL_DEBUG_BUFBYTES
1428 wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1429 #endif
1430
1431 we->we_wapbl = wl;
1432 we->we_bufcount = wl->wl_bufcount;
1433 #ifdef WAPBL_DEBUG_BUFBYTES
1434 we->we_unsynced_bufbytes = wl->wl_bufbytes;
1435 #endif
1436 we->we_reclaimable_bytes = flushsize;
1437 we->we_error = 0;
1438 SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1439
1440 /*
1441 * this flushes bufs in reverse order than they were queued
1442 * it shouldn't matter, but if we care we could use TAILQ instead.
1443 * XXX Note they will get put on the lru queue when they flush
1444 * so we might actually want to change this to preserve order.
1445 */
1446 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
1447 if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1448 continue;
1449 }
1450 bp->b_iodone = wapbl_biodone;
1451 bp->b_private = we;
1452 bremfree(bp);
1453 wapbl_remove_buf_locked(wl, bp);
1454 mutex_exit(&wl->wl_mtx);
1455 mutex_exit(&bufcache_lock);
1456 bawrite(bp);
1457 mutex_enter(&bufcache_lock);
1458 mutex_enter(&wl->wl_mtx);
1459 }
1460 mutex_exit(&wl->wl_mtx);
1461 mutex_exit(&bufcache_lock);
1462
1463 #if 0
1464 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1465 ("wapbl_flush thread %d.%d done flushing entries...\n",
1466 curproc->p_pid, curlwp->l_lid));
1467 #endif
1468
1469 out:
1470
1471 /*
1472 * If the waitfor flag is set, don't return until everything is
1473 * fully flushed and the on disk log is empty.
1474 */
1475 if (waitfor) {
1476 error = wapbl_truncate(wl, wl->wl_circ_size -
1477 wl->wl_reserved_bytes, wapbl_lazy_truncate);
1478 }
1479
1480 out2:
1481 if (error) {
1482 wl->wl_flush_abort(wl->wl_mount, wl->wl_deallocblks,
1483 wl->wl_dealloclens, wl->wl_dealloccnt);
1484 }
1485
1486 #ifdef WAPBL_DEBUG_PRINT
1487 if (error) {
1488 pid_t pid = -1;
1489 lwpid_t lid = -1;
1490 if (curproc)
1491 pid = curproc->p_pid;
1492 if (curlwp)
1493 lid = curlwp->l_lid;
1494 mutex_enter(&wl->wl_mtx);
1495 #ifdef WAPBL_DEBUG_BUFBYTES
1496 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1497 ("wapbl_flush: thread %d.%d aborted flush: "
1498 "error = %d\n"
1499 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1500 "deallocs=%d inodes=%d\n"
1501 "\terrcnt = %d, reclaimable=%zu reserved=%zu "
1502 "unsynced=%zu\n",
1503 pid, lid, error, wl->wl_bufcount,
1504 wl->wl_bufbytes, wl->wl_bcount,
1505 wl->wl_dealloccnt, wl->wl_inohashcnt,
1506 wl->wl_error_count, wl->wl_reclaimable_bytes,
1507 wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1508 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1509 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1510 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1511 "error = %d, unsynced = %zu\n",
1512 we->we_bufcount, we->we_reclaimable_bytes,
1513 we->we_error, we->we_unsynced_bufbytes));
1514 }
1515 #else
1516 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1517 ("wapbl_flush: thread %d.%d aborted flush: "
1518 "error = %d\n"
1519 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1520 "deallocs=%d inodes=%d\n"
1521 "\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
1522 pid, lid, error, wl->wl_bufcount,
1523 wl->wl_bufbytes, wl->wl_bcount,
1524 wl->wl_dealloccnt, wl->wl_inohashcnt,
1525 wl->wl_error_count, wl->wl_reclaimable_bytes,
1526 wl->wl_reserved_bytes));
1527 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1528 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1529 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1530 "error = %d\n", we->we_bufcount,
1531 we->we_reclaimable_bytes, we->we_error));
1532 }
1533 #endif
1534 mutex_exit(&wl->wl_mtx);
1535 }
1536 #endif
1537
1538 rw_exit(&wl->wl_rwlock);
1539 return error;
1540 }
1541
1542 /****************************************************************/
1543
1544 void
1545 wapbl_jlock_assert(struct wapbl *wl)
1546 {
1547
1548 #ifdef WAPBL_DEBUG_SERIALIZE
1549 KASSERT(rw_write_held(&wl->wl_rwlock));
1550 #else
1551 KASSERT(rw_read_held(&wl->wl_rwlock) || rw_write_held(&wl->wl_rwlock));
1552 #endif
1553 }
1554
1555 void
1556 wapbl_junlock_assert(struct wapbl *wl)
1557 {
1558
1559 #ifdef WAPBL_DEBUG_SERIALIZE
1560 KASSERT(!rw_write_held(&wl->wl_rwlock));
1561 #endif
1562 }
1563
1564 /****************************************************************/
1565
1566 /* locks missing */
1567 void
1568 wapbl_print(struct wapbl *wl,
1569 int full,
1570 void (*pr)(const char *, ...))
1571 {
1572 struct buf *bp;
1573 struct wapbl_entry *we;
1574 (*pr)("wapbl %p", wl);
1575 (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
1576 wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
1577 (*pr)("circ = %zu, header = %zu, head = %"PRIdMAX" tail = %"PRIdMAX"\n",
1578 wl->wl_circ_size, wl->wl_circ_off,
1579 (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
1580 (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
1581 wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
1582 #ifdef WAPBL_DEBUG_BUFBYTES
1583 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1584 "reserved = %zu errcnt = %d unsynced = %zu\n",
1585 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1586 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1587 wl->wl_error_count, wl->wl_unsynced_bufbytes);
1588 #else
1589 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1590 "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
1591 wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1592 wl->wl_error_count);
1593 #endif
1594 (*pr)("\tdealloccnt = %d, dealloclim = %d\n",
1595 wl->wl_dealloccnt, wl->wl_dealloclim);
1596 (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
1597 wl->wl_inohashcnt, wl->wl_inohashmask);
1598 (*pr)("entries:\n");
1599 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1600 #ifdef WAPBL_DEBUG_BUFBYTES
1601 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
1602 "unsynced = %zu\n",
1603 we->we_bufcount, we->we_reclaimable_bytes,
1604 we->we_error, we->we_unsynced_bufbytes);
1605 #else
1606 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
1607 we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
1608 #endif
1609 }
1610 if (full) {
1611 int cnt = 0;
1612 (*pr)("bufs =");
1613 LIST_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
1614 if (!LIST_NEXT(bp, b_wapbllist)) {
1615 (*pr)(" %p", bp);
1616 } else if ((++cnt % 6) == 0) {
1617 (*pr)(" %p,\n\t", bp);
1618 } else {
1619 (*pr)(" %p,", bp);
1620 }
1621 }
1622 (*pr)("\n");
1623
1624 (*pr)("dealloced blks = ");
1625 {
1626 int i;
1627 cnt = 0;
1628 for (i = 0; i < wl->wl_dealloccnt; i++) {
1629 (*pr)(" %"PRId64":%d,",
1630 wl->wl_deallocblks[i],
1631 wl->wl_dealloclens[i]);
1632 if ((++cnt % 4) == 0) {
1633 (*pr)("\n\t");
1634 }
1635 }
1636 }
1637 (*pr)("\n");
1638
1639 (*pr)("registered inodes = ");
1640 {
1641 int i;
1642 cnt = 0;
1643 for (i = 0; i <= wl->wl_inohashmask; i++) {
1644 struct wapbl_ino_head *wih;
1645 struct wapbl_ino *wi;
1646
1647 wih = &wl->wl_inohash[i];
1648 LIST_FOREACH(wi, wih, wi_hash) {
1649 if (wi->wi_ino == 0)
1650 continue;
1651 (*pr)(" %"PRId32"/0%06"PRIo32",",
1652 wi->wi_ino, wi->wi_mode);
1653 if ((++cnt % 4) == 0) {
1654 (*pr)("\n\t");
1655 }
1656 }
1657 }
1658 (*pr)("\n");
1659 }
1660 }
1661 }
1662
1663 #if defined(WAPBL_DEBUG) || defined(DDB)
1664 void
1665 wapbl_dump(struct wapbl *wl)
1666 {
1667 #if defined(WAPBL_DEBUG)
1668 if (!wl)
1669 wl = wapbl_debug_wl;
1670 #endif
1671 if (!wl)
1672 return;
1673 wapbl_print(wl, 1, printf);
1674 }
1675 #endif
1676
1677 /****************************************************************/
1678
1679 void
1680 wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len)
1681 {
1682
1683 wapbl_jlock_assert(wl);
1684
1685 /* XXX should eventually instead tie this into resource estimation */
1686 /* XXX this KASSERT needs locking/mutex analysis */
1687 KASSERT(wl->wl_dealloccnt < wl->wl_dealloclim);
1688 wl->wl_deallocblks[wl->wl_dealloccnt] = blk;
1689 wl->wl_dealloclens[wl->wl_dealloccnt] = len;
1690 wl->wl_dealloccnt++;
1691 WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
1692 ("wapbl_register_deallocation: blk=%"PRId64" len=%d\n", blk, len));
1693 }
1694
1695 /****************************************************************/
1696
1697 static void
1698 wapbl_inodetrk_init(struct wapbl *wl, u_int size)
1699 {
1700
1701 wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
1702 if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) {
1703 pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0,
1704 "wapblinopl", &pool_allocator_nointr, IPL_NONE);
1705 }
1706 }
1707
1708 static void
1709 wapbl_inodetrk_free(struct wapbl *wl)
1710 {
1711
1712 /* XXX this KASSERT needs locking/mutex analysis */
1713 KASSERT(wl->wl_inohashcnt == 0);
1714 hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
1715 if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) {
1716 pool_destroy(&wapbl_ino_pool);
1717 }
1718 }
1719
1720 static struct wapbl_ino *
1721 wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
1722 {
1723 struct wapbl_ino_head *wih;
1724 struct wapbl_ino *wi;
1725
1726 KASSERT(mutex_owned(&wl->wl_mtx));
1727
1728 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1729 LIST_FOREACH(wi, wih, wi_hash) {
1730 if (ino == wi->wi_ino)
1731 return wi;
1732 }
1733 return 0;
1734 }
1735
1736 void
1737 wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1738 {
1739 struct wapbl_ino_head *wih;
1740 struct wapbl_ino *wi;
1741
1742 wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
1743
1744 mutex_enter(&wl->wl_mtx);
1745 if (wapbl_inodetrk_get(wl, ino) == NULL) {
1746 wi->wi_ino = ino;
1747 wi->wi_mode = mode;
1748 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1749 LIST_INSERT_HEAD(wih, wi, wi_hash);
1750 wl->wl_inohashcnt++;
1751 WAPBL_PRINTF(WAPBL_PRINT_INODE,
1752 ("wapbl_register_inode: ino=%"PRId64"\n", ino));
1753 mutex_exit(&wl->wl_mtx);
1754 } else {
1755 mutex_exit(&wl->wl_mtx);
1756 pool_put(&wapbl_ino_pool, wi);
1757 }
1758 }
1759
1760 void
1761 wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1762 {
1763 struct wapbl_ino *wi;
1764
1765 mutex_enter(&wl->wl_mtx);
1766 wi = wapbl_inodetrk_get(wl, ino);
1767 if (wi) {
1768 WAPBL_PRINTF(WAPBL_PRINT_INODE,
1769 ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
1770 KASSERT(wl->wl_inohashcnt > 0);
1771 wl->wl_inohashcnt--;
1772 LIST_REMOVE(wi, wi_hash);
1773 mutex_exit(&wl->wl_mtx);
1774
1775 pool_put(&wapbl_ino_pool, wi);
1776 } else {
1777 mutex_exit(&wl->wl_mtx);
1778 }
1779 }
1780
1781 /****************************************************************/
1782
1783 static __inline size_t
1784 wapbl_transaction_inodes_len(struct wapbl *wl)
1785 {
1786 int blocklen = 1<<wl->wl_log_dev_bshift;
1787 int iph;
1788
1789 /* Calculate number of inodes described in a inodelist header */
1790 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
1791 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
1792
1793 KASSERT(iph > 0);
1794
1795 return MAX(1, howmany(wl->wl_inohashcnt, iph))*blocklen;
1796 }
1797
1798
1799 /* Calculate amount of space a transaction will take on disk */
1800 static size_t
1801 wapbl_transaction_len(struct wapbl *wl)
1802 {
1803 int blocklen = 1<<wl->wl_log_dev_bshift;
1804 size_t len;
1805 int bph;
1806
1807 /* Calculate number of blocks described in a blocklist header */
1808 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1809 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1810
1811 KASSERT(bph > 0);
1812
1813 len = wl->wl_bcount;
1814 len += howmany(wl->wl_bufcount, bph)*blocklen;
1815 len += howmany(wl->wl_dealloccnt, bph)*blocklen;
1816 len += wapbl_transaction_inodes_len(wl);
1817
1818 return len;
1819 }
1820
1821 /*
1822 * Perform commit operation
1823 *
1824 * Note that generation number incrementation needs to
1825 * be protected against racing with other invocations
1826 * of wapbl_commit. This is ok since this routine
1827 * is only invoked from wapbl_flush
1828 */
1829 static int
1830 wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
1831 {
1832 struct wapbl_wc_header *wc = wl->wl_wc_header;
1833 struct timespec ts;
1834 int error;
1835 int force = 1;
1836
1837 /* XXX Calc checksum here, instead we do this for now */
1838 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1839 if (error) {
1840 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1841 ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1842 "returned %d\n", wl->wl_devvp->v_rdev, error));
1843 }
1844
1845 wc->wc_head = head;
1846 wc->wc_tail = tail;
1847 wc->wc_checksum = 0;
1848 wc->wc_version = 1;
1849 getnanotime(&ts);
1850 wc->wc_time = ts.tv_sec;
1851 wc->wc_timensec = ts.tv_nsec;
1852
1853 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1854 ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
1855 (intmax_t)head, (intmax_t)tail));
1856
1857 /*
1858 * XXX if generation will rollover, then first zero
1859 * over second commit header before trying to write both headers.
1860 */
1861
1862 error = wapbl_write(wc, wc->wc_len, wl->wl_devvp,
1863 wl->wl_logpbn + wc->wc_generation % 2);
1864 if (error)
1865 return error;
1866
1867 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1868 if (error) {
1869 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1870 ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1871 "returned %d\n", wl->wl_devvp->v_rdev, error));
1872 }
1873
1874 /*
1875 * If the generation number was zero, write it out a second time.
1876 * This handles initialization and generation number rollover
1877 */
1878 if (wc->wc_generation++ == 0) {
1879 error = wapbl_write_commit(wl, head, tail);
1880 /*
1881 * This panic should be able to be removed if we do the
1882 * zero'ing mentioned above, and we are certain to roll
1883 * back generation number on failure.
1884 */
1885 if (error)
1886 panic("wapbl_write_commit: error writing duplicate "
1887 "log header: %d\n", error);
1888 }
1889 return 0;
1890 }
1891
1892 /* Returns new offset value */
1893 static int
1894 wapbl_write_blocks(struct wapbl *wl, off_t *offp)
1895 {
1896 struct wapbl_wc_blocklist *wc =
1897 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1898 int blocklen = 1<<wl->wl_log_dev_bshift;
1899 int bph;
1900 struct buf *bp;
1901 off_t off = *offp;
1902 int error;
1903 size_t padding;
1904
1905 KASSERT(rw_write_held(&wl->wl_rwlock));
1906
1907 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1908 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1909
1910 bp = LIST_FIRST(&wl->wl_bufs);
1911
1912 while (bp) {
1913 int cnt;
1914 struct buf *obp = bp;
1915
1916 KASSERT(bp->b_flags & B_LOCKED);
1917
1918 wc->wc_type = WAPBL_WC_BLOCKS;
1919 wc->wc_len = blocklen;
1920 wc->wc_blkcount = 0;
1921 while (bp && (wc->wc_blkcount < bph)) {
1922 /*
1923 * Make sure all the physical block numbers are up to
1924 * date. If this is not always true on a given
1925 * filesystem, then VOP_BMAP must be called. We
1926 * could call VOP_BMAP here, or else in the filesystem
1927 * specific flush callback, although neither of those
1928 * solutions allow us to take the vnode lock. If a
1929 * filesystem requires that we must take the vnode lock
1930 * to call VOP_BMAP, then we can probably do it in
1931 * bwrite when the vnode lock should already be held
1932 * by the invoking code.
1933 */
1934 KASSERT((bp->b_vp->v_type == VBLK) ||
1935 (bp->b_blkno != bp->b_lblkno));
1936 KASSERT(bp->b_blkno > 0);
1937
1938 wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
1939 wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
1940 wc->wc_len += bp->b_bcount;
1941 wc->wc_blkcount++;
1942 bp = LIST_NEXT(bp, b_wapbllist);
1943 }
1944 if (wc->wc_len % blocklen != 0) {
1945 padding = blocklen - wc->wc_len % blocklen;
1946 wc->wc_len += padding;
1947 } else {
1948 padding = 0;
1949 }
1950
1951 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1952 ("wapbl_write_blocks: len = %u (padding %zu) off = %"PRIdMAX"\n",
1953 wc->wc_len, padding, (intmax_t)off));
1954
1955 error = wapbl_circ_write(wl, wc, blocklen, &off);
1956 if (error)
1957 return error;
1958 bp = obp;
1959 cnt = 0;
1960 while (bp && (cnt++ < bph)) {
1961 error = wapbl_circ_write(wl, bp->b_data,
1962 bp->b_bcount, &off);
1963 if (error)
1964 return error;
1965 bp = LIST_NEXT(bp, b_wapbllist);
1966 }
1967 if (padding) {
1968 void *zero;
1969
1970 zero = wapbl_malloc(padding);
1971 memset(zero, 0, padding);
1972 error = wapbl_circ_write(wl, zero, padding, &off);
1973 wapbl_free(zero, padding);
1974 if (error)
1975 return error;
1976 }
1977 }
1978 *offp = off;
1979 return 0;
1980 }
1981
1982 static int
1983 wapbl_write_revocations(struct wapbl *wl, off_t *offp)
1984 {
1985 struct wapbl_wc_blocklist *wc =
1986 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1987 int i;
1988 int blocklen = 1<<wl->wl_log_dev_bshift;
1989 int bph;
1990 off_t off = *offp;
1991 int error;
1992
1993 if (wl->wl_dealloccnt == 0)
1994 return 0;
1995
1996 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1997 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1998
1999 i = 0;
2000 while (i < wl->wl_dealloccnt) {
2001 wc->wc_type = WAPBL_WC_REVOCATIONS;
2002 wc->wc_len = blocklen;
2003 wc->wc_blkcount = 0;
2004 while ((i < wl->wl_dealloccnt) && (wc->wc_blkcount < bph)) {
2005 wc->wc_blocks[wc->wc_blkcount].wc_daddr =
2006 wl->wl_deallocblks[i];
2007 wc->wc_blocks[wc->wc_blkcount].wc_dlen =
2008 wl->wl_dealloclens[i];
2009 wc->wc_blkcount++;
2010 i++;
2011 }
2012 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2013 ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
2014 wc->wc_len, (intmax_t)off));
2015 error = wapbl_circ_write(wl, wc, blocklen, &off);
2016 if (error)
2017 return error;
2018 }
2019 *offp = off;
2020 return 0;
2021 }
2022
2023 static int
2024 wapbl_write_inodes(struct wapbl *wl, off_t *offp)
2025 {
2026 struct wapbl_wc_inodelist *wc =
2027 (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
2028 int i;
2029 int blocklen = 1 << wl->wl_log_dev_bshift;
2030 off_t off = *offp;
2031 int error;
2032
2033 struct wapbl_ino_head *wih;
2034 struct wapbl_ino *wi;
2035 int iph;
2036
2037 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2038 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2039
2040 i = 0;
2041 wih = &wl->wl_inohash[0];
2042 wi = 0;
2043 do {
2044 wc->wc_type = WAPBL_WC_INODES;
2045 wc->wc_len = blocklen;
2046 wc->wc_inocnt = 0;
2047 wc->wc_clear = (i == 0);
2048 while ((i < wl->wl_inohashcnt) && (wc->wc_inocnt < iph)) {
2049 while (!wi) {
2050 KASSERT((wih - &wl->wl_inohash[0])
2051 <= wl->wl_inohashmask);
2052 wi = LIST_FIRST(wih++);
2053 }
2054 wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2055 wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2056 wc->wc_inocnt++;
2057 i++;
2058 wi = LIST_NEXT(wi, wi_hash);
2059 }
2060 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2061 ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2062 wc->wc_len, (intmax_t)off));
2063 error = wapbl_circ_write(wl, wc, blocklen, &off);
2064 if (error)
2065 return error;
2066 } while (i < wl->wl_inohashcnt);
2067
2068 *offp = off;
2069 return 0;
2070 }
2071
2072 #endif /* _KERNEL */
2073
2074 /****************************************************************/
2075
2076 struct wapbl_blk {
2077 LIST_ENTRY(wapbl_blk) wb_hash;
2078 daddr_t wb_blk;
2079 off_t wb_off; /* Offset of this block in the log */
2080 };
2081 #define WAPBL_BLKPOOL_MIN 83
2082
2083 static void
2084 wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2085 {
2086 if (size < WAPBL_BLKPOOL_MIN)
2087 size = WAPBL_BLKPOOL_MIN;
2088 KASSERT(wr->wr_blkhash == 0);
2089 #ifdef _KERNEL
2090 wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2091 #else /* ! _KERNEL */
2092 /* Manually implement hashinit */
2093 {
2094 int i;
2095 unsigned long hashsize;
2096 for (hashsize = 1; hashsize < size; hashsize <<= 1)
2097 continue;
2098 wr->wr_blkhash = wapbl_malloc(hashsize * sizeof(*wr->wr_blkhash));
2099 for (i = 0; i < wr->wr_blkhashmask; i++)
2100 LIST_INIT(&wr->wr_blkhash[i]);
2101 wr->wr_blkhashmask = hashsize - 1;
2102 }
2103 #endif /* ! _KERNEL */
2104 }
2105
2106 static void
2107 wapbl_blkhash_free(struct wapbl_replay *wr)
2108 {
2109 KASSERT(wr->wr_blkhashcnt == 0);
2110 #ifdef _KERNEL
2111 hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2112 #else /* ! _KERNEL */
2113 wapbl_free(wr->wr_blkhash,
2114 (wr->wr_blkhashmask + 1) * sizeof(*wr->wr_blkhash));
2115 #endif /* ! _KERNEL */
2116 }
2117
2118 static struct wapbl_blk *
2119 wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2120 {
2121 struct wapbl_blk_head *wbh;
2122 struct wapbl_blk *wb;
2123 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2124 LIST_FOREACH(wb, wbh, wb_hash) {
2125 if (blk == wb->wb_blk)
2126 return wb;
2127 }
2128 return 0;
2129 }
2130
2131 static void
2132 wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2133 {
2134 struct wapbl_blk_head *wbh;
2135 struct wapbl_blk *wb;
2136 wb = wapbl_blkhash_get(wr, blk);
2137 if (wb) {
2138 KASSERT(wb->wb_blk == blk);
2139 wb->wb_off = off;
2140 } else {
2141 wb = wapbl_malloc(sizeof(*wb));
2142 wb->wb_blk = blk;
2143 wb->wb_off = off;
2144 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2145 LIST_INSERT_HEAD(wbh, wb, wb_hash);
2146 wr->wr_blkhashcnt++;
2147 }
2148 }
2149
2150 static void
2151 wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2152 {
2153 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2154 if (wb) {
2155 KASSERT(wr->wr_blkhashcnt > 0);
2156 wr->wr_blkhashcnt--;
2157 LIST_REMOVE(wb, wb_hash);
2158 wapbl_free(wb, sizeof(*wb));
2159 }
2160 }
2161
2162 static void
2163 wapbl_blkhash_clear(struct wapbl_replay *wr)
2164 {
2165 int i;
2166 for (i = 0; i <= wr->wr_blkhashmask; i++) {
2167 struct wapbl_blk *wb;
2168
2169 while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2170 KASSERT(wr->wr_blkhashcnt > 0);
2171 wr->wr_blkhashcnt--;
2172 LIST_REMOVE(wb, wb_hash);
2173 wapbl_free(wb, sizeof(*wb));
2174 }
2175 }
2176 KASSERT(wr->wr_blkhashcnt == 0);
2177 }
2178
2179 /****************************************************************/
2180
2181 static int
2182 wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2183 {
2184 size_t slen;
2185 off_t off = *offp;
2186 int error;
2187
2188 KASSERT(((len >> wr->wr_log_dev_bshift) <<
2189 wr->wr_log_dev_bshift) == len);
2190 if (off < wr->wr_circ_off)
2191 off = wr->wr_circ_off;
2192 slen = wr->wr_circ_off + wr->wr_circ_size - off;
2193 if (slen < len) {
2194 error = wapbl_read(data, slen, wr->wr_devvp,
2195 wr->wr_logpbn + (off >> wr->wr_log_dev_bshift));
2196 if (error)
2197 return error;
2198 data = (uint8_t *)data + slen;
2199 len -= slen;
2200 off = wr->wr_circ_off;
2201 }
2202 error = wapbl_read(data, len, wr->wr_devvp,
2203 wr->wr_logpbn + (off >> wr->wr_log_dev_bshift));
2204 if (error)
2205 return error;
2206 off += len;
2207 if (off >= wr->wr_circ_off + wr->wr_circ_size)
2208 off = wr->wr_circ_off;
2209 *offp = off;
2210 return 0;
2211 }
2212
2213 static void
2214 wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2215 {
2216 size_t slen;
2217 off_t off = *offp;
2218
2219 KASSERT(((len >> wr->wr_log_dev_bshift) <<
2220 wr->wr_log_dev_bshift) == len);
2221
2222 if (off < wr->wr_circ_off)
2223 off = wr->wr_circ_off;
2224 slen = wr->wr_circ_off + wr->wr_circ_size - off;
2225 if (slen < len) {
2226 len -= slen;
2227 off = wr->wr_circ_off;
2228 }
2229 off += len;
2230 if (off >= wr->wr_circ_off + wr->wr_circ_size)
2231 off = wr->wr_circ_off;
2232 *offp = off;
2233 }
2234
2235 /****************************************************************/
2236
2237 int
2238 wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2239 daddr_t off, size_t count, size_t blksize)
2240 {
2241 struct wapbl_replay *wr;
2242 int error;
2243 struct vnode *devvp;
2244 daddr_t logpbn;
2245 uint8_t *scratch;
2246 struct wapbl_wc_header *wch;
2247 struct wapbl_wc_header *wch2;
2248 /* Use this until we read the actual log header */
2249 int log_dev_bshift = DEV_BSHIFT;
2250 size_t used;
2251
2252 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2253 ("wapbl_replay_start: vp=%p off=%"PRId64 " count=%zu blksize=%zu\n",
2254 vp, off, count, blksize));
2255
2256 if (off < 0)
2257 return EINVAL;
2258
2259 if (blksize < DEV_BSIZE)
2260 return EINVAL;
2261 if (blksize % DEV_BSIZE)
2262 return EINVAL;
2263
2264 #ifdef _KERNEL
2265 #if 0
2266 /* XXX vp->v_size isn't reliably set for VBLK devices,
2267 * especially root. However, we might still want to verify
2268 * that the full load is readable */
2269 if ((off + count) * blksize > vp->v_size)
2270 return EINVAL;
2271 #endif
2272
2273 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2274 return error;
2275 }
2276 #else /* ! _KERNEL */
2277 devvp = vp;
2278 logpbn = off;
2279 #endif /* ! _KERNEL */
2280
2281 scratch = wapbl_malloc(MAXBSIZE);
2282
2283 error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, logpbn);
2284 if (error)
2285 goto errout;
2286
2287 wch = (struct wapbl_wc_header *)scratch;
2288 wch2 =
2289 (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
2290 /* XXX verify checksums and magic numbers */
2291 if (wch->wc_type != WAPBL_WC_HEADER) {
2292 printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
2293 error = EFTYPE;
2294 goto errout;
2295 }
2296
2297 if (wch2->wc_generation > wch->wc_generation)
2298 wch = wch2;
2299
2300 wr = wapbl_calloc(1, sizeof(*wr));
2301
2302 wr->wr_logvp = vp;
2303 wr->wr_devvp = devvp;
2304 wr->wr_logpbn = logpbn;
2305
2306 wr->wr_scratch = scratch;
2307
2308 wr->wr_log_dev_bshift = wch->wc_log_dev_bshift;
2309 wr->wr_fs_dev_bshift = wch->wc_fs_dev_bshift;
2310 wr->wr_circ_off = wch->wc_circ_off;
2311 wr->wr_circ_size = wch->wc_circ_size;
2312 wr->wr_generation = wch->wc_generation;
2313
2314 used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
2315
2316 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2317 ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
2318 " len=%"PRId64" used=%zu\n",
2319 wch->wc_head, wch->wc_tail, wch->wc_circ_off,
2320 wch->wc_circ_size, used));
2321
2322 wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
2323
2324 error = wapbl_replay_process(wr, wch->wc_head, wch->wc_tail);
2325 if (error) {
2326 wapbl_replay_stop(wr);
2327 wapbl_replay_free(wr);
2328 return error;
2329 }
2330
2331 *wrp = wr;
2332 return 0;
2333
2334 errout:
2335 wapbl_free(scratch, MAXBSIZE);
2336 return error;
2337 }
2338
2339 void
2340 wapbl_replay_stop(struct wapbl_replay *wr)
2341 {
2342
2343 if (!wapbl_replay_isopen(wr))
2344 return;
2345
2346 WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
2347
2348 wapbl_free(wr->wr_scratch, MAXBSIZE);
2349 wr->wr_scratch = NULL;
2350
2351 wr->wr_logvp = NULL;
2352
2353 wapbl_blkhash_clear(wr);
2354 wapbl_blkhash_free(wr);
2355 }
2356
2357 void
2358 wapbl_replay_free(struct wapbl_replay *wr)
2359 {
2360
2361 KDASSERT(!wapbl_replay_isopen(wr));
2362
2363 if (wr->wr_inodes)
2364 wapbl_free(wr->wr_inodes,
2365 wr->wr_inodescnt * sizeof(wr->wr_inodes[0]));
2366 wapbl_free(wr, sizeof(*wr));
2367 }
2368
2369 #ifdef _KERNEL
2370 int
2371 wapbl_replay_isopen1(struct wapbl_replay *wr)
2372 {
2373
2374 return wapbl_replay_isopen(wr);
2375 }
2376 #endif
2377
2378 static void
2379 wapbl_replay_process_blocks(struct wapbl_replay *wr, off_t *offp)
2380 {
2381 struct wapbl_wc_blocklist *wc =
2382 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2383 int fsblklen = 1 << wr->wr_fs_dev_bshift;
2384 int i, j, n;
2385
2386 for (i = 0; i < wc->wc_blkcount; i++) {
2387 /*
2388 * Enter each physical block into the hashtable independently.
2389 */
2390 n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
2391 for (j = 0; j < n; j++) {
2392 wapbl_blkhash_ins(wr, wc->wc_blocks[i].wc_daddr + j,
2393 *offp);
2394 wapbl_circ_advance(wr, fsblklen, offp);
2395 }
2396 }
2397 }
2398
2399 static void
2400 wapbl_replay_process_revocations(struct wapbl_replay *wr)
2401 {
2402 struct wapbl_wc_blocklist *wc =
2403 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2404 int i, j, n;
2405
2406 for (i = 0; i < wc->wc_blkcount; i++) {
2407 /*
2408 * Remove any blocks found from the hashtable.
2409 */
2410 n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
2411 for (j = 0; j < n; j++)
2412 wapbl_blkhash_rem(wr, wc->wc_blocks[i].wc_daddr + j);
2413 }
2414 }
2415
2416 static void
2417 wapbl_replay_process_inodes(struct wapbl_replay *wr, off_t oldoff, off_t newoff)
2418 {
2419 struct wapbl_wc_inodelist *wc =
2420 (struct wapbl_wc_inodelist *)wr->wr_scratch;
2421 void *new_inodes;
2422 const size_t oldsize = wr->wr_inodescnt * sizeof(wr->wr_inodes[0]);
2423
2424 KASSERT(sizeof(wr->wr_inodes[0]) == sizeof(wc->wc_inodes[0]));
2425
2426 /*
2427 * Keep track of where we found this so location won't be
2428 * overwritten.
2429 */
2430 if (wc->wc_clear) {
2431 wr->wr_inodestail = oldoff;
2432 wr->wr_inodescnt = 0;
2433 if (wr->wr_inodes != NULL) {
2434 wapbl_free(wr->wr_inodes, oldsize);
2435 wr->wr_inodes = NULL;
2436 }
2437 }
2438 wr->wr_inodeshead = newoff;
2439 if (wc->wc_inocnt == 0)
2440 return;
2441
2442 new_inodes = wapbl_malloc((wr->wr_inodescnt + wc->wc_inocnt) *
2443 sizeof(wr->wr_inodes[0]));
2444 if (wr->wr_inodes != NULL) {
2445 memcpy(new_inodes, wr->wr_inodes, oldsize);
2446 wapbl_free(wr->wr_inodes, oldsize);
2447 }
2448 wr->wr_inodes = new_inodes;
2449 memcpy(&wr->wr_inodes[wr->wr_inodescnt], wc->wc_inodes,
2450 wc->wc_inocnt * sizeof(wr->wr_inodes[0]));
2451 wr->wr_inodescnt += wc->wc_inocnt;
2452 }
2453
2454 static int
2455 wapbl_replay_process(struct wapbl_replay *wr, off_t head, off_t tail)
2456 {
2457 off_t off;
2458 int error;
2459
2460 int logblklen = 1 << wr->wr_log_dev_bshift;
2461
2462 wapbl_blkhash_clear(wr);
2463
2464 off = tail;
2465 while (off != head) {
2466 struct wapbl_wc_null *wcn;
2467 off_t saveoff = off;
2468 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2469 if (error)
2470 goto errout;
2471 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2472 switch (wcn->wc_type) {
2473 case WAPBL_WC_BLOCKS:
2474 wapbl_replay_process_blocks(wr, &off);
2475 break;
2476
2477 case WAPBL_WC_REVOCATIONS:
2478 wapbl_replay_process_revocations(wr);
2479 break;
2480
2481 case WAPBL_WC_INODES:
2482 wapbl_replay_process_inodes(wr, saveoff, off);
2483 break;
2484
2485 default:
2486 printf("Unrecognized wapbl type: 0x%08x\n",
2487 wcn->wc_type);
2488 error = EFTYPE;
2489 goto errout;
2490 }
2491 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2492 if (off != saveoff) {
2493 printf("wapbl_replay: corrupted records\n");
2494 error = EFTYPE;
2495 goto errout;
2496 }
2497 }
2498 return 0;
2499
2500 errout:
2501 wapbl_blkhash_clear(wr);
2502 return error;
2503 }
2504
2505 #if 0
2506 int
2507 wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
2508 {
2509 off_t off;
2510 int mismatchcnt = 0;
2511 int logblklen = 1 << wr->wr_log_dev_bshift;
2512 int fsblklen = 1 << wr->wr_fs_dev_bshift;
2513 void *scratch1 = wapbl_malloc(MAXBSIZE);
2514 void *scratch2 = wapbl_malloc(MAXBSIZE);
2515 int error = 0;
2516
2517 KDASSERT(wapbl_replay_isopen(wr));
2518
2519 off = wch->wc_tail;
2520 while (off != wch->wc_head) {
2521 struct wapbl_wc_null *wcn;
2522 #ifdef DEBUG
2523 off_t saveoff = off;
2524 #endif
2525 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2526 if (error)
2527 goto out;
2528 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2529 switch (wcn->wc_type) {
2530 case WAPBL_WC_BLOCKS:
2531 {
2532 struct wapbl_wc_blocklist *wc =
2533 (struct wapbl_wc_blocklist *)wr->wr_scratch;
2534 int i;
2535 for (i = 0; i < wc->wc_blkcount; i++) {
2536 int foundcnt = 0;
2537 int dirtycnt = 0;
2538 int j, n;
2539 /*
2540 * Check each physical block into the
2541 * hashtable independently
2542 */
2543 n = wc->wc_blocks[i].wc_dlen >>
2544 wch->wc_fs_dev_bshift;
2545 for (j = 0; j < n; j++) {
2546 struct wapbl_blk *wb =
2547 wapbl_blkhash_get(wr,
2548 wc->wc_blocks[i].wc_daddr + j);
2549 if (wb && (wb->wb_off == off)) {
2550 foundcnt++;
2551 error =
2552 wapbl_circ_read(wr,
2553 scratch1, fsblklen,
2554 &off);
2555 if (error)
2556 goto out;
2557 error =
2558 wapbl_read(scratch2,
2559 fsblklen, fsdevvp,
2560 wb->wb_blk);
2561 if (error)
2562 goto out;
2563 if (memcmp(scratch1,
2564 scratch2,
2565 fsblklen)) {
2566 printf(
2567 "wapbl_verify: mismatch block %"PRId64" at off %"PRIdMAX"\n",
2568 wb->wb_blk, (intmax_t)off);
2569 dirtycnt++;
2570 mismatchcnt++;
2571 }
2572 } else {
2573 wapbl_circ_advance(wr,
2574 fsblklen, &off);
2575 }
2576 }
2577 #if 0
2578 /*
2579 * If all of the blocks in an entry
2580 * are clean, then remove all of its
2581 * blocks from the hashtable since they
2582 * never will need replay.
2583 */
2584 if ((foundcnt != 0) &&
2585 (dirtycnt == 0)) {
2586 off = saveoff;
2587 wapbl_circ_advance(wr,
2588 logblklen, &off);
2589 for (j = 0; j < n; j++) {
2590 struct wapbl_blk *wb =
2591 wapbl_blkhash_get(wr,
2592 wc->wc_blocks[i].wc_daddr + j);
2593 if (wb &&
2594 (wb->wb_off == off)) {
2595 wapbl_blkhash_rem(wr, wb->wb_blk);
2596 }
2597 wapbl_circ_advance(wr,
2598 fsblklen, &off);
2599 }
2600 }
2601 #endif
2602 }
2603 }
2604 break;
2605 case WAPBL_WC_REVOCATIONS:
2606 case WAPBL_WC_INODES:
2607 break;
2608 default:
2609 KASSERT(0);
2610 }
2611 #ifdef DEBUG
2612 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2613 KASSERT(off == saveoff);
2614 #endif
2615 }
2616 out:
2617 wapbl_free(scratch1, MAXBSIZE);
2618 wapbl_free(scratch2, MAXBSIZE);
2619 if (!error && mismatchcnt)
2620 error = EFTYPE;
2621 return error;
2622 }
2623 #endif
2624
2625 int
2626 wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
2627 {
2628 struct wapbl_blk *wb;
2629 size_t i;
2630 off_t off;
2631 void *scratch;
2632 int error = 0;
2633 int fsblklen = 1 << wr->wr_fs_dev_bshift;
2634
2635 KDASSERT(wapbl_replay_isopen(wr));
2636
2637 scratch = wapbl_malloc(MAXBSIZE);
2638
2639 for (i = 0; i < wr->wr_blkhashmask; ++i) {
2640 LIST_FOREACH(wb, &wr->wr_blkhash[i], wb_hash) {
2641 off = wb->wb_off;
2642 error = wapbl_circ_read(wr, scratch, fsblklen, &off);
2643 if (error)
2644 break;
2645 error = wapbl_write(scratch, fsblklen, fsdevvp,
2646 wb->wb_blk);
2647 if (error)
2648 break;
2649 }
2650 }
2651
2652 wapbl_free(scratch, MAXBSIZE);
2653 return error;
2654 }
2655
2656 int
2657 wapbl_replay_can_read(struct wapbl_replay *wr, daddr_t blk, long len)
2658 {
2659 int fsblklen = 1 << wr->wr_fs_dev_bshift;
2660
2661 KDASSERT(wapbl_replay_isopen(wr));
2662 KASSERT((len % fsblklen) == 0);
2663
2664 while (len != 0) {
2665 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2666 if (wb)
2667 return 1;
2668 len -= fsblklen;
2669 }
2670 return 0;
2671 }
2672
2673 int
2674 wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
2675 {
2676 int fsblklen = 1 << wr->wr_fs_dev_bshift;
2677
2678 KDASSERT(wapbl_replay_isopen(wr));
2679
2680 KASSERT((len % fsblklen) == 0);
2681
2682 while (len != 0) {
2683 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2684 if (wb) {
2685 off_t off = wb->wb_off;
2686 int error;
2687 error = wapbl_circ_read(wr, data, fsblklen, &off);
2688 if (error)
2689 return error;
2690 }
2691 data = (uint8_t *)data + fsblklen;
2692 len -= fsblklen;
2693 blk++;
2694 }
2695 return 0;
2696 }
2697