vfs_wapbl.c revision 1.105 1 /* $NetBSD: vfs_wapbl.c,v 1.105 2020/03/14 15:32:51 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2003, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This implements file system independent write ahead filesystem logging.
34 */
35
36 #define WAPBL_INTERNAL
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.105 2020/03/14 15:32:51 ad Exp $");
40
41 #include <sys/param.h>
42 #include <sys/bitops.h>
43 #include <sys/time.h>
44 #include <sys/wapbl.h>
45 #include <sys/wapbl_replay.h>
46
47 #ifdef _KERNEL
48
49 #include <sys/atomic.h>
50 #include <sys/conf.h>
51 #include <sys/evcnt.h>
52 #include <sys/file.h>
53 #include <sys/kauth.h>
54 #include <sys/kernel.h>
55 #include <sys/module.h>
56 #include <sys/mount.h>
57 #include <sys/mutex.h>
58 #include <sys/namei.h>
59 #include <sys/proc.h>
60 #include <sys/resourcevar.h>
61 #include <sys/sysctl.h>
62 #include <sys/uio.h>
63 #include <sys/vnode.h>
64
65 #include <miscfs/specfs/specdev.h>
66
67 #define wapbl_alloc(s) kmem_alloc((s), KM_SLEEP)
68 #define wapbl_free(a, s) kmem_free((a), (s))
69 #define wapbl_calloc(n, s) kmem_zalloc((n)*(s), KM_SLEEP)
70
71 static struct sysctllog *wapbl_sysctl;
72 static int wapbl_flush_disk_cache = 1;
73 static int wapbl_verbose_commit = 0;
74 static int wapbl_allow_dpofua = 0; /* switched off by default for now */
75 static int wapbl_journal_iobufs = 4;
76
77 static inline size_t wapbl_space_free(size_t, off_t, off_t);
78
79 #else /* !_KERNEL */
80
81 #include <assert.h>
82 #include <errno.h>
83 #include <stdbool.h>
84 #include <stdio.h>
85 #include <stdlib.h>
86 #include <string.h>
87
88 #define KDASSERT(x) assert(x)
89 #define KASSERT(x) assert(x)
90 #define wapbl_alloc(s) malloc(s)
91 #define wapbl_free(a, s) free(a)
92 #define wapbl_calloc(n, s) calloc((n), (s))
93
94 #endif /* !_KERNEL */
95
96 /*
97 * INTERNAL DATA STRUCTURES
98 */
99
100 /*
101 * This structure holds per-mount log information.
102 *
103 * Legend: a = atomic access only
104 * r = read-only after init
105 * l = rwlock held
106 * m = mutex held
107 * lm = rwlock held writing or mutex held
108 * u = unlocked access ok
109 * b = bufcache_lock held
110 */
111 LIST_HEAD(wapbl_ino_head, wapbl_ino);
112 struct wapbl {
113 struct vnode *wl_logvp; /* r: log here */
114 struct vnode *wl_devvp; /* r: log on this device */
115 struct mount *wl_mount; /* r: mountpoint wl is associated with */
116 daddr_t wl_logpbn; /* r: Physical block number of start of log */
117 int wl_log_dev_bshift; /* r: logarithm of device block size of log
118 device */
119 int wl_fs_dev_bshift; /* r: logarithm of device block size of
120 filesystem device */
121
122 unsigned wl_lock_count; /* m: Count of transactions in progress */
123
124 size_t wl_circ_size; /* r: Number of bytes in buffer of log */
125 size_t wl_circ_off; /* r: Number of bytes reserved at start */
126
127 size_t wl_bufcount_max; /* r: Number of buffers reserved for log */
128 size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */
129
130 off_t wl_head; /* l: Byte offset of log head */
131 off_t wl_tail; /* l: Byte offset of log tail */
132 /*
133 * WAPBL log layout, stored on wl_devvp at wl_logpbn:
134 *
135 * ___________________ wl_circ_size __________________
136 * / \
137 * +---------+---------+-------+--------------+--------+
138 * [ commit0 | commit1 | CCWCW | EEEEEEEEEEEE | CCCWCW ]
139 * +---------+---------+-------+--------------+--------+
140 * wl_circ_off --^ ^-- wl_head ^-- wl_tail
141 *
142 * commit0 and commit1 are commit headers. A commit header has
143 * a generation number, indicating which of the two headers is
144 * more recent, and an assignment of head and tail pointers.
145 * The rest is a circular queue of log records, starting at
146 * the byte offset wl_circ_off.
147 *
148 * E marks empty space for records.
149 * W marks records for block writes issued but waiting.
150 * C marks completed records.
151 *
152 * wapbl_flush writes new records to empty `E' spaces after
153 * wl_head from the current transaction in memory.
154 *
155 * wapbl_truncate advances wl_tail past any completed `C'
156 * records, freeing them up for use.
157 *
158 * head == tail == 0 means log is empty.
159 * head == tail != 0 means log is full.
160 *
161 * See assertions in wapbl_advance() for other boundary
162 * conditions.
163 *
164 * Only wapbl_flush moves the head, except when wapbl_truncate
165 * sets it to 0 to indicate that the log is empty.
166 *
167 * Only wapbl_truncate moves the tail, except when wapbl_flush
168 * sets it to wl_circ_off to indicate that the log is full.
169 */
170
171 struct wapbl_wc_header *wl_wc_header; /* l */
172 void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */
173
174 kmutex_t wl_mtx; /* u: short-term lock */
175 krwlock_t wl_rwlock; /* u: File system transaction lock */
176
177 /*
178 * Must be held while accessing
179 * wl_count or wl_bufs or head or tail
180 */
181
182 #if _KERNEL
183 /*
184 * Callback called from within the flush routine to flush any extra
185 * bits. Note that flush may be skipped without calling this if
186 * there are no outstanding buffers in the transaction.
187 */
188 wapbl_flush_fn_t wl_flush; /* r */
189 wapbl_flush_fn_t wl_flush_abort;/* r */
190
191 /* Event counters */
192 char wl_ev_group[EVCNT_STRING_MAX]; /* r */
193 struct evcnt wl_ev_commit; /* l */
194 struct evcnt wl_ev_journalwrite; /* l */
195 struct evcnt wl_ev_jbufs_bio_nowait; /* l */
196 struct evcnt wl_ev_metawrite; /* lm */
197 struct evcnt wl_ev_cacheflush; /* l */
198 #endif
199
200 size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */
201 size_t wl_bufcount; /* m: Count of buffers in wl_bufs */
202 size_t wl_bcount; /* m: Total bcount of wl_bufs */
203
204 TAILQ_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
205
206 kcondvar_t wl_reclaimable_cv; /* m (obviously) */
207 size_t wl_reclaimable_bytes; /* m: Amount of space available for
208 reclamation by truncate */
209 int wl_error_count; /* m: # of wl_entries with errors */
210 size_t wl_reserved_bytes; /* never truncate log smaller than this */
211
212 #ifdef WAPBL_DEBUG_BUFBYTES
213 size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
214 #endif
215
216 #if _KERNEL
217 int wl_brperjblock; /* r Block records per journal block */
218 #endif
219
220 TAILQ_HEAD(, wapbl_dealloc) wl_dealloclist; /* lm: list head */
221 int wl_dealloccnt; /* lm: total count */
222 int wl_dealloclim; /* r: max count */
223
224 /* hashtable of inode numbers for allocated but unlinked inodes */
225 /* synch ??? */
226 struct wapbl_ino_head *wl_inohash;
227 u_long wl_inohashmask;
228 int wl_inohashcnt;
229
230 SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* On disk transaction
231 accounting */
232
233 /* buffers for wapbl_buffered_write() */
234 TAILQ_HEAD(, buf) wl_iobufs; /* l: Free or filling bufs */
235 TAILQ_HEAD(, buf) wl_iobufs_busy; /* l: In-transit bufs */
236
237 int wl_dkcache; /* r: disk cache flags */
238 #define WAPBL_USE_FUA(wl) \
239 (wapbl_allow_dpofua && ISSET((wl)->wl_dkcache, DKCACHE_FUA))
240 #define WAPBL_JFLAGS(wl) \
241 (WAPBL_USE_FUA(wl) ? (wl)->wl_jwrite_flags : 0)
242 #define WAPBL_JDATA_FLAGS(wl) \
243 (WAPBL_JFLAGS(wl) & B_MEDIA_DPO) /* only DPO */
244 int wl_jwrite_flags; /* r: journal write flags */
245 };
246
247 #ifdef WAPBL_DEBUG_PRINT
248 int wapbl_debug_print = WAPBL_DEBUG_PRINT;
249 #endif
250
251 /****************************************************************/
252 #ifdef _KERNEL
253
254 #ifdef WAPBL_DEBUG
255 struct wapbl *wapbl_debug_wl;
256 #endif
257
258 static int wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail);
259 static int wapbl_write_blocks(struct wapbl *wl, off_t *offp);
260 static int wapbl_write_revocations(struct wapbl *wl, off_t *offp);
261 static int wapbl_write_inodes(struct wapbl *wl, off_t *offp);
262 #endif /* _KERNEL */
263
264 static int wapbl_replay_process(struct wapbl_replay *wr, off_t, off_t);
265
266 static inline size_t wapbl_space_used(size_t avail, off_t head,
267 off_t tail);
268
269 #ifdef _KERNEL
270
271 static struct pool wapbl_entry_pool;
272 static struct pool wapbl_dealloc_pool;
273
274 #define WAPBL_INODETRK_SIZE 83
275 static int wapbl_ino_pool_refcount;
276 static struct pool wapbl_ino_pool;
277 struct wapbl_ino {
278 LIST_ENTRY(wapbl_ino) wi_hash;
279 ino_t wi_ino;
280 mode_t wi_mode;
281 };
282
283 static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
284 static void wapbl_inodetrk_free(struct wapbl *wl);
285 static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
286
287 static size_t wapbl_transaction_len(struct wapbl *wl);
288 static inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
289
290 static void wapbl_deallocation_free(struct wapbl *, struct wapbl_dealloc *,
291 bool);
292
293 static void wapbl_evcnt_init(struct wapbl *);
294 static void wapbl_evcnt_free(struct wapbl *);
295
296 static void wapbl_dkcache_init(struct wapbl *);
297
298 #if 0
299 int wapbl_replay_verify(struct wapbl_replay *, struct vnode *);
300 #endif
301
302 static int wapbl_replay_isopen1(struct wapbl_replay *);
303
304 const struct wapbl_ops wapbl_ops = {
305 .wo_wapbl_discard = wapbl_discard,
306 .wo_wapbl_replay_isopen = wapbl_replay_isopen1,
307 .wo_wapbl_replay_can_read = wapbl_replay_can_read,
308 .wo_wapbl_replay_read = wapbl_replay_read,
309 .wo_wapbl_add_buf = wapbl_add_buf,
310 .wo_wapbl_remove_buf = wapbl_remove_buf,
311 .wo_wapbl_resize_buf = wapbl_resize_buf,
312 .wo_wapbl_begin = wapbl_begin,
313 .wo_wapbl_end = wapbl_end,
314 .wo_wapbl_junlock_assert= wapbl_junlock_assert,
315 .wo_wapbl_jlock_assert = wapbl_jlock_assert,
316
317 /* XXX: the following is only used to say "this is a wapbl buf" */
318 .wo_wapbl_biodone = wapbl_biodone,
319 };
320
321 static int
322 wapbl_sysctl_init(void)
323 {
324 int rv;
325 const struct sysctlnode *rnode, *cnode;
326
327 wapbl_sysctl = NULL;
328
329 rv = sysctl_createv(&wapbl_sysctl, 0, NULL, &rnode,
330 CTLFLAG_PERMANENT,
331 CTLTYPE_NODE, "wapbl",
332 SYSCTL_DESCR("WAPBL journaling options"),
333 NULL, 0, NULL, 0,
334 CTL_VFS, CTL_CREATE, CTL_EOL);
335 if (rv)
336 return rv;
337
338 rv = sysctl_createv(&wapbl_sysctl, 0, &rnode, &cnode,
339 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
340 CTLTYPE_INT, "flush_disk_cache",
341 SYSCTL_DESCR("flush disk cache"),
342 NULL, 0, &wapbl_flush_disk_cache, 0,
343 CTL_CREATE, CTL_EOL);
344 if (rv)
345 return rv;
346
347 rv = sysctl_createv(&wapbl_sysctl, 0, &rnode, &cnode,
348 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
349 CTLTYPE_INT, "verbose_commit",
350 SYSCTL_DESCR("show time and size of wapbl log commits"),
351 NULL, 0, &wapbl_verbose_commit, 0,
352 CTL_CREATE, CTL_EOL);
353 if (rv)
354 return rv;
355
356 rv = sysctl_createv(&wapbl_sysctl, 0, &rnode, &cnode,
357 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
358 CTLTYPE_INT, "allow_dpofua",
359 SYSCTL_DESCR("allow use of FUA/DPO instead of cache flush if available"),
360 NULL, 0, &wapbl_allow_dpofua, 0,
361 CTL_CREATE, CTL_EOL);
362 if (rv)
363 return rv;
364
365 rv = sysctl_createv(&wapbl_sysctl, 0, &rnode, &cnode,
366 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
367 CTLTYPE_INT, "journal_iobufs",
368 SYSCTL_DESCR("count of bufs used for journal I/O (max async count)"),
369 NULL, 0, &wapbl_journal_iobufs, 0,
370 CTL_CREATE, CTL_EOL);
371 if (rv)
372 return rv;
373
374 return rv;
375 }
376
377 static void
378 wapbl_init(void)
379 {
380
381 pool_init(&wapbl_entry_pool, sizeof(struct wapbl_entry), 0, 0, 0,
382 "wapblentrypl", &pool_allocator_kmem, IPL_VM);
383 pool_init(&wapbl_dealloc_pool, sizeof(struct wapbl_dealloc), 0, 0, 0,
384 "wapbldealloc", &pool_allocator_nointr, IPL_NONE);
385
386 wapbl_sysctl_init();
387 }
388
389 static int
390 wapbl_fini(void)
391 {
392
393 if (wapbl_sysctl != NULL)
394 sysctl_teardown(&wapbl_sysctl);
395
396 pool_destroy(&wapbl_dealloc_pool);
397 pool_destroy(&wapbl_entry_pool);
398
399 return 0;
400 }
401
402 static void
403 wapbl_evcnt_init(struct wapbl *wl)
404 {
405 snprintf(wl->wl_ev_group, sizeof(wl->wl_ev_group),
406 "wapbl fsid 0x%x/0x%x",
407 wl->wl_mount->mnt_stat.f_fsidx.__fsid_val[0],
408 wl->wl_mount->mnt_stat.f_fsidx.__fsid_val[1]
409 );
410
411 evcnt_attach_dynamic(&wl->wl_ev_commit, EVCNT_TYPE_MISC,
412 NULL, wl->wl_ev_group, "commit");
413 evcnt_attach_dynamic(&wl->wl_ev_journalwrite, EVCNT_TYPE_MISC,
414 NULL, wl->wl_ev_group, "journal write total");
415 evcnt_attach_dynamic(&wl->wl_ev_jbufs_bio_nowait, EVCNT_TYPE_MISC,
416 NULL, wl->wl_ev_group, "journal write finished async");
417 evcnt_attach_dynamic(&wl->wl_ev_metawrite, EVCNT_TYPE_MISC,
418 NULL, wl->wl_ev_group, "metadata async write");
419 evcnt_attach_dynamic(&wl->wl_ev_cacheflush, EVCNT_TYPE_MISC,
420 NULL, wl->wl_ev_group, "cache flush");
421 }
422
423 static void
424 wapbl_evcnt_free(struct wapbl *wl)
425 {
426 evcnt_detach(&wl->wl_ev_commit);
427 evcnt_detach(&wl->wl_ev_journalwrite);
428 evcnt_detach(&wl->wl_ev_jbufs_bio_nowait);
429 evcnt_detach(&wl->wl_ev_metawrite);
430 evcnt_detach(&wl->wl_ev_cacheflush);
431 }
432
433 static void
434 wapbl_dkcache_init(struct wapbl *wl)
435 {
436 int error;
437
438 /* Get disk cache flags */
439 error = VOP_IOCTL(wl->wl_devvp, DIOCGCACHE, &wl->wl_dkcache,
440 FWRITE, FSCRED);
441 if (error) {
442 /* behave as if there was a write cache */
443 wl->wl_dkcache = DKCACHE_WRITE;
444 }
445
446 /* Use FUA instead of cache flush if available */
447 if (ISSET(wl->wl_dkcache, DKCACHE_FUA))
448 wl->wl_jwrite_flags |= B_MEDIA_FUA;
449
450 /* Use DPO for journal writes if available */
451 if (ISSET(wl->wl_dkcache, DKCACHE_DPO))
452 wl->wl_jwrite_flags |= B_MEDIA_DPO;
453 }
454
455 static int
456 wapbl_start_flush_inodes(struct wapbl *wl, struct wapbl_replay *wr)
457 {
458 int error, i;
459
460 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
461 ("wapbl_start: reusing log with %d inodes\n", wr->wr_inodescnt));
462
463 /*
464 * Its only valid to reuse the replay log if its
465 * the same as the new log we just opened.
466 */
467 KDASSERT(!wapbl_replay_isopen(wr));
468 KASSERT(wl->wl_devvp->v_type == VBLK);
469 KASSERT(wr->wr_devvp->v_type == VBLK);
470 KASSERT(wl->wl_devvp->v_rdev == wr->wr_devvp->v_rdev);
471 KASSERT(wl->wl_logpbn == wr->wr_logpbn);
472 KASSERT(wl->wl_circ_size == wr->wr_circ_size);
473 KASSERT(wl->wl_circ_off == wr->wr_circ_off);
474 KASSERT(wl->wl_log_dev_bshift == wr->wr_log_dev_bshift);
475 KASSERT(wl->wl_fs_dev_bshift == wr->wr_fs_dev_bshift);
476
477 wl->wl_wc_header->wc_generation = wr->wr_generation + 1;
478
479 for (i = 0; i < wr->wr_inodescnt; i++)
480 wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
481 wr->wr_inodes[i].wr_imode);
482
483 /* Make sure new transaction won't overwrite old inodes list */
484 KDASSERT(wapbl_transaction_len(wl) <=
485 wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
486 wr->wr_inodestail));
487
488 wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
489 wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
490 wapbl_transaction_len(wl);
491
492 error = wapbl_write_inodes(wl, &wl->wl_head);
493 if (error)
494 return error;
495
496 KASSERT(wl->wl_head != wl->wl_tail);
497 KASSERT(wl->wl_head != 0);
498
499 return 0;
500 }
501
502 int
503 wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
504 daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
505 wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
506 {
507 struct wapbl *wl;
508 struct vnode *devvp;
509 daddr_t logpbn;
510 int error;
511 int log_dev_bshift = ilog2(blksize);
512 int fs_dev_bshift = log_dev_bshift;
513 int run;
514
515 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_start: vp=%p off=%" PRId64
516 " count=%zu blksize=%zu\n", vp, off, count, blksize));
517
518 if (log_dev_bshift > fs_dev_bshift) {
519 WAPBL_PRINTF(WAPBL_PRINT_OPEN,
520 ("wapbl: log device's block size cannot be larger "
521 "than filesystem's\n"));
522 /*
523 * Not currently implemented, although it could be if
524 * needed someday.
525 */
526 return ENOSYS;
527 }
528
529 if (off < 0)
530 return EINVAL;
531
532 if (blksize < DEV_BSIZE)
533 return EINVAL;
534 if (blksize % DEV_BSIZE)
535 return EINVAL;
536
537 /* XXXTODO: verify that the full load is writable */
538
539 /*
540 * XXX check for minimum log size
541 * minimum is governed by minimum amount of space
542 * to complete a transaction. (probably truncate)
543 */
544 /* XXX for now pick something minimal */
545 if ((count * blksize) < MAXPHYS) {
546 return ENOSPC;
547 }
548
549 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
550 return error;
551 }
552
553 wl = wapbl_calloc(1, sizeof(*wl));
554 rw_init(&wl->wl_rwlock);
555 mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
556 cv_init(&wl->wl_reclaimable_cv, "wapblrec");
557 TAILQ_INIT(&wl->wl_bufs);
558 SIMPLEQ_INIT(&wl->wl_entries);
559
560 wl->wl_logvp = vp;
561 wl->wl_devvp = devvp;
562 wl->wl_mount = mp;
563 wl->wl_logpbn = logpbn;
564 wl->wl_log_dev_bshift = log_dev_bshift;
565 wl->wl_fs_dev_bshift = fs_dev_bshift;
566
567 wl->wl_flush = flushfn;
568 wl->wl_flush_abort = flushabortfn;
569
570 /* Reserve two log device blocks for the commit headers */
571 wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
572 wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
573 /* truncate the log usage to a multiple of log_dev_bshift */
574 wl->wl_circ_size >>= wl->wl_log_dev_bshift;
575 wl->wl_circ_size <<= wl->wl_log_dev_bshift;
576
577 /*
578 * wl_bufbytes_max limits the size of the in memory transaction space.
579 * - Since buffers are allocated and accounted for in units of
580 * PAGE_SIZE it is required to be a multiple of PAGE_SIZE
581 * (i.e. 1<<PAGE_SHIFT)
582 * - Since the log device has to be written in units of
583 * 1<<wl_log_dev_bshift it is required to be a mulitple of
584 * 1<<wl_log_dev_bshift.
585 * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
586 * it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
587 * Therefore it must be multiple of the least common multiple of those
588 * three quantities. Fortunately, all of those quantities are
589 * guaranteed to be a power of two, and the least common multiple of
590 * a set of numbers which are all powers of two is simply the maximum
591 * of those numbers. Finally, the maximum logarithm of a power of two
592 * is the same as the log of the maximum power of two. So we can do
593 * the following operations to size wl_bufbytes_max:
594 */
595
596 /* XXX fix actual number of pages reserved per filesystem. */
597 wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
598
599 /* Round wl_bufbytes_max to the largest power of two constraint */
600 wl->wl_bufbytes_max >>= PAGE_SHIFT;
601 wl->wl_bufbytes_max <<= PAGE_SHIFT;
602 wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
603 wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
604 wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
605 wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
606
607 /* XXX maybe use filesystem fragment size instead of 1024 */
608 /* XXX fix actual number of buffers reserved per filesystem. */
609 wl->wl_bufcount_max = (buf_nbuf() / 2) * 1024;
610
611 wl->wl_brperjblock = ((1<<wl->wl_log_dev_bshift)
612 - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
613 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
614 KASSERT(wl->wl_brperjblock > 0);
615
616 /* XXX tie this into resource estimation */
617 wl->wl_dealloclim = wl->wl_bufbytes_max / mp->mnt_stat.f_bsize / 2;
618 TAILQ_INIT(&wl->wl_dealloclist);
619
620 wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
621
622 wapbl_evcnt_init(wl);
623
624 wapbl_dkcache_init(wl);
625
626 /* Initialize the commit header */
627 {
628 struct wapbl_wc_header *wc;
629 size_t len = 1 << wl->wl_log_dev_bshift;
630 wc = wapbl_calloc(1, len);
631 wc->wc_type = WAPBL_WC_HEADER;
632 wc->wc_len = len;
633 wc->wc_circ_off = wl->wl_circ_off;
634 wc->wc_circ_size = wl->wl_circ_size;
635 /* XXX wc->wc_fsid */
636 wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
637 wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
638 wl->wl_wc_header = wc;
639 wl->wl_wc_scratch = wapbl_alloc(len);
640 }
641
642 TAILQ_INIT(&wl->wl_iobufs);
643 TAILQ_INIT(&wl->wl_iobufs_busy);
644 for (int i = 0; i < wapbl_journal_iobufs; i++) {
645 struct buf *bp;
646
647 if ((bp = geteblk(MAXPHYS)) == NULL)
648 goto errout;
649
650 mutex_enter(&bufcache_lock);
651 mutex_enter(devvp->v_interlock);
652 bgetvp(devvp, bp);
653 mutex_exit(devvp->v_interlock);
654 mutex_exit(&bufcache_lock);
655
656 bp->b_dev = devvp->v_rdev;
657
658 TAILQ_INSERT_TAIL(&wl->wl_iobufs, bp, b_wapbllist);
659 }
660
661 /*
662 * if there was an existing set of unlinked but
663 * allocated inodes, preserve it in the new
664 * log.
665 */
666 if (wr && wr->wr_inodescnt) {
667 error = wapbl_start_flush_inodes(wl, wr);
668 if (error)
669 goto errout;
670 }
671
672 error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
673 if (error) {
674 goto errout;
675 }
676
677 *wlp = wl;
678 #if defined(WAPBL_DEBUG)
679 wapbl_debug_wl = wl;
680 #endif
681
682 return 0;
683 errout:
684 wapbl_discard(wl);
685 wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
686 wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
687 while (!TAILQ_EMPTY(&wl->wl_iobufs)) {
688 struct buf *bp;
689
690 bp = TAILQ_FIRST(&wl->wl_iobufs);
691 TAILQ_REMOVE(&wl->wl_iobufs, bp, b_wapbllist);
692 brelse(bp, BC_INVAL);
693 }
694 wapbl_inodetrk_free(wl);
695 wapbl_free(wl, sizeof(*wl));
696
697 return error;
698 }
699
700 /*
701 * Like wapbl_flush, only discards the transaction
702 * completely
703 */
704
705 void
706 wapbl_discard(struct wapbl *wl)
707 {
708 struct wapbl_entry *we;
709 struct wapbl_dealloc *wd;
710 struct buf *bp;
711 int i;
712
713 /*
714 * XXX we may consider using upgrade here
715 * if we want to call flush from inside a transaction
716 */
717 rw_enter(&wl->wl_rwlock, RW_WRITER);
718 wl->wl_flush(wl->wl_mount, TAILQ_FIRST(&wl->wl_dealloclist));
719
720 #ifdef WAPBL_DEBUG_PRINT
721 {
722 pid_t pid = -1;
723 lwpid_t lid = -1;
724 if (curproc)
725 pid = curproc->p_pid;
726 if (curlwp)
727 lid = curlwp->l_lid;
728 #ifdef WAPBL_DEBUG_BUFBYTES
729 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
730 ("wapbl_discard: thread %d.%d discarding "
731 "transaction\n"
732 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
733 "deallocs=%d inodes=%d\n"
734 "\terrcnt = %u, reclaimable=%zu reserved=%zu "
735 "unsynced=%zu\n",
736 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
737 wl->wl_bcount, wl->wl_dealloccnt,
738 wl->wl_inohashcnt, wl->wl_error_count,
739 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
740 wl->wl_unsynced_bufbytes));
741 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
742 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
743 ("\tentry: bufcount = %zu, reclaimable = %zu, "
744 "error = %d, unsynced = %zu\n",
745 we->we_bufcount, we->we_reclaimable_bytes,
746 we->we_error, we->we_unsynced_bufbytes));
747 }
748 #else /* !WAPBL_DEBUG_BUFBYTES */
749 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
750 ("wapbl_discard: thread %d.%d discarding transaction\n"
751 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
752 "deallocs=%d inodes=%d\n"
753 "\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
754 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
755 wl->wl_bcount, wl->wl_dealloccnt,
756 wl->wl_inohashcnt, wl->wl_error_count,
757 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes));
758 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
759 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
760 ("\tentry: bufcount = %zu, reclaimable = %zu, "
761 "error = %d\n",
762 we->we_bufcount, we->we_reclaimable_bytes,
763 we->we_error));
764 }
765 #endif /* !WAPBL_DEBUG_BUFBYTES */
766 }
767 #endif /* WAPBL_DEBUG_PRINT */
768
769 for (i = 0; i <= wl->wl_inohashmask; i++) {
770 struct wapbl_ino_head *wih;
771 struct wapbl_ino *wi;
772
773 wih = &wl->wl_inohash[i];
774 while ((wi = LIST_FIRST(wih)) != NULL) {
775 LIST_REMOVE(wi, wi_hash);
776 pool_put(&wapbl_ino_pool, wi);
777 KASSERT(wl->wl_inohashcnt > 0);
778 wl->wl_inohashcnt--;
779 }
780 }
781
782 /*
783 * clean buffer list
784 */
785 mutex_enter(&bufcache_lock);
786 mutex_enter(&wl->wl_mtx);
787 while ((bp = TAILQ_FIRST(&wl->wl_bufs)) != NULL) {
788 if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
789 /*
790 * The buffer will be unlocked and
791 * removed from the transaction in brelse
792 */
793 mutex_exit(&wl->wl_mtx);
794 brelsel(bp, 0);
795 mutex_enter(&wl->wl_mtx);
796 }
797 }
798 mutex_exit(&wl->wl_mtx);
799 mutex_exit(&bufcache_lock);
800
801 /*
802 * Remove references to this wl from wl_entries, free any which
803 * no longer have buffers, others will be freed in wapbl_biodone
804 * when they no longer have any buffers.
805 */
806 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
807 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
808 /* XXX should we be accumulating wl_error_count
809 * and increasing reclaimable bytes ? */
810 we->we_wapbl = NULL;
811 if (we->we_bufcount == 0) {
812 #ifdef WAPBL_DEBUG_BUFBYTES
813 KASSERT(we->we_unsynced_bufbytes == 0);
814 #endif
815 pool_put(&wapbl_entry_pool, we);
816 }
817 }
818
819 /* Discard list of deallocs */
820 while ((wd = TAILQ_FIRST(&wl->wl_dealloclist)) != NULL)
821 wapbl_deallocation_free(wl, wd, true);
822
823 /* XXX should we clear wl_reserved_bytes? */
824
825 KASSERT(wl->wl_bufbytes == 0);
826 KASSERT(wl->wl_bcount == 0);
827 KASSERT(wl->wl_bufcount == 0);
828 KASSERT(TAILQ_EMPTY(&wl->wl_bufs));
829 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
830 KASSERT(wl->wl_inohashcnt == 0);
831 KASSERT(TAILQ_EMPTY(&wl->wl_dealloclist));
832 KASSERT(wl->wl_dealloccnt == 0);
833
834 rw_exit(&wl->wl_rwlock);
835 }
836
837 int
838 wapbl_stop(struct wapbl *wl, int force)
839 {
840 int error;
841
842 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
843 error = wapbl_flush(wl, 1);
844 if (error) {
845 if (force)
846 wapbl_discard(wl);
847 else
848 return error;
849 }
850
851 /* Unlinked inodes persist after a flush */
852 if (wl->wl_inohashcnt) {
853 if (force) {
854 wapbl_discard(wl);
855 } else {
856 return EBUSY;
857 }
858 }
859
860 KASSERT(wl->wl_bufbytes == 0);
861 KASSERT(wl->wl_bcount == 0);
862 KASSERT(wl->wl_bufcount == 0);
863 KASSERT(TAILQ_EMPTY(&wl->wl_bufs));
864 KASSERT(wl->wl_dealloccnt == 0);
865 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
866 KASSERT(wl->wl_inohashcnt == 0);
867 KASSERT(TAILQ_EMPTY(&wl->wl_dealloclist));
868 KASSERT(wl->wl_dealloccnt == 0);
869 KASSERT(TAILQ_EMPTY(&wl->wl_iobufs_busy));
870
871 wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
872 wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
873 while (!TAILQ_EMPTY(&wl->wl_iobufs)) {
874 struct buf *bp;
875
876 bp = TAILQ_FIRST(&wl->wl_iobufs);
877 TAILQ_REMOVE(&wl->wl_iobufs, bp, b_wapbllist);
878 brelse(bp, BC_INVAL);
879 }
880 wapbl_inodetrk_free(wl);
881
882 wapbl_evcnt_free(wl);
883
884 cv_destroy(&wl->wl_reclaimable_cv);
885 mutex_destroy(&wl->wl_mtx);
886 rw_destroy(&wl->wl_rwlock);
887 wapbl_free(wl, sizeof(*wl));
888
889 return 0;
890 }
891
892 /****************************************************************/
893 /*
894 * Unbuffered disk I/O
895 */
896
897 static void
898 wapbl_doio_accounting(struct vnode *devvp, int flags)
899 {
900 struct pstats *pstats = curlwp->l_proc->p_stats;
901
902 if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
903 mutex_enter(devvp->v_interlock);
904 devvp->v_numoutput++;
905 mutex_exit(devvp->v_interlock);
906 pstats->p_ru.ru_oublock++;
907 } else {
908 pstats->p_ru.ru_inblock++;
909 }
910
911 }
912
913 static int
914 wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
915 {
916 struct buf *bp;
917 int error;
918
919 KASSERT(devvp->v_type == VBLK);
920
921 wapbl_doio_accounting(devvp, flags);
922
923 bp = getiobuf(devvp, true);
924 bp->b_flags = flags;
925 bp->b_cflags |= BC_BUSY; /* mandatory, asserted by biowait() */
926 bp->b_dev = devvp->v_rdev;
927 bp->b_data = data;
928 bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
929 bp->b_blkno = pbn;
930 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
931
932 WAPBL_PRINTF(WAPBL_PRINT_IO,
933 ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%"PRIx64"\n",
934 BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
935 bp->b_blkno, bp->b_dev));
936
937 VOP_STRATEGY(devvp, bp);
938
939 error = biowait(bp);
940 putiobuf(bp);
941
942 if (error) {
943 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
944 ("wapbl_doio: %s %zu bytes at block %" PRId64
945 " on dev 0x%"PRIx64" failed with error %d\n",
946 (((flags & (B_WRITE | B_READ)) == B_WRITE) ?
947 "write" : "read"),
948 len, pbn, devvp->v_rdev, error));
949 }
950
951 return error;
952 }
953
954 /*
955 * wapbl_write(data, len, devvp, pbn)
956 *
957 * Synchronously write len bytes from data to physical block pbn
958 * on devvp.
959 */
960 int
961 wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
962 {
963
964 return wapbl_doio(data, len, devvp, pbn, B_WRITE);
965 }
966
967 /*
968 * wapbl_read(data, len, devvp, pbn)
969 *
970 * Synchronously read len bytes into data from physical block pbn
971 * on devvp.
972 */
973 int
974 wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
975 {
976
977 return wapbl_doio(data, len, devvp, pbn, B_READ);
978 }
979
980 /****************************************************************/
981 /*
982 * Buffered disk writes -- try to coalesce writes and emit
983 * MAXPHYS-aligned blocks.
984 */
985
986 /*
987 * wapbl_buffered_write_async(wl, bp)
988 *
989 * Send buffer for asynchronous write.
990 */
991 static void
992 wapbl_buffered_write_async(struct wapbl *wl, struct buf *bp)
993 {
994 wapbl_doio_accounting(wl->wl_devvp, bp->b_flags);
995
996 KASSERT(TAILQ_FIRST(&wl->wl_iobufs) == bp);
997 TAILQ_REMOVE(&wl->wl_iobufs, bp, b_wapbllist);
998
999 bp->b_flags |= B_WRITE;
1000 bp->b_cflags |= BC_BUSY; /* mandatory, asserted by biowait() */
1001 bp->b_oflags = 0;
1002 bp->b_bcount = bp->b_resid;
1003 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
1004
1005 VOP_STRATEGY(wl->wl_devvp, bp);
1006
1007 wl->wl_ev_journalwrite.ev_count++;
1008
1009 TAILQ_INSERT_TAIL(&wl->wl_iobufs_busy, bp, b_wapbllist);
1010 }
1011
1012 /*
1013 * wapbl_buffered_flush(wl)
1014 *
1015 * Flush any buffered writes from wapbl_buffered_write.
1016 */
1017 static int
1018 wapbl_buffered_flush(struct wapbl *wl, bool full)
1019 {
1020 int error = 0;
1021 struct buf *bp, *bnext;
1022 bool only_done = true, found = false;
1023
1024 /* if there is outstanding buffered write, send it now */
1025 if ((bp = TAILQ_FIRST(&wl->wl_iobufs)) && bp->b_resid > 0)
1026 wapbl_buffered_write_async(wl, bp);
1027
1028 /* wait for I/O to complete */
1029 again:
1030 TAILQ_FOREACH_SAFE(bp, &wl->wl_iobufs_busy, b_wapbllist, bnext) {
1031 if (!full && only_done) {
1032 /* skip unfinished */
1033 if (!ISSET(bp->b_oflags, BO_DONE))
1034 continue;
1035 }
1036
1037 if (ISSET(bp->b_oflags, BO_DONE))
1038 wl->wl_ev_jbufs_bio_nowait.ev_count++;
1039
1040 TAILQ_REMOVE(&wl->wl_iobufs_busy, bp, b_wapbllist);
1041 error = biowait(bp);
1042
1043 /* reset for reuse */
1044 bp->b_blkno = bp->b_resid = bp->b_flags = 0;
1045 TAILQ_INSERT_TAIL(&wl->wl_iobufs, bp, b_wapbllist);
1046 found = true;
1047
1048 if (!full)
1049 break;
1050 }
1051
1052 if (!found && only_done && !TAILQ_EMPTY(&wl->wl_iobufs_busy)) {
1053 only_done = false;
1054 goto again;
1055 }
1056
1057 return error;
1058 }
1059
1060 /*
1061 * wapbl_buffered_write(data, len, wl, pbn)
1062 *
1063 * Write len bytes from data to physical block pbn on
1064 * wl->wl_devvp. The write may not complete until
1065 * wapbl_buffered_flush.
1066 */
1067 static int
1068 wapbl_buffered_write(void *data, size_t len, struct wapbl *wl, daddr_t pbn,
1069 int bflags)
1070 {
1071 size_t resid;
1072 struct buf *bp;
1073
1074 again:
1075 bp = TAILQ_FIRST(&wl->wl_iobufs);
1076
1077 if (bp == NULL) {
1078 /* No more buffers, wait for any previous I/O to finish. */
1079 wapbl_buffered_flush(wl, false);
1080
1081 bp = TAILQ_FIRST(&wl->wl_iobufs);
1082 KASSERT(bp != NULL);
1083 }
1084
1085 /*
1086 * If not adjacent to buffered data flush first. Disk block
1087 * address is always valid for non-empty buffer.
1088 */
1089 if ((bp->b_resid > 0 && pbn != bp->b_blkno + btodb(bp->b_resid))) {
1090 wapbl_buffered_write_async(wl, bp);
1091 goto again;
1092 }
1093
1094 /*
1095 * If this write goes to an empty buffer we have to
1096 * save the disk block address first.
1097 */
1098 if (bp->b_blkno == 0) {
1099 bp->b_blkno = pbn;
1100 bp->b_flags |= bflags;
1101 }
1102
1103 /*
1104 * Remaining space so this buffer ends on a buffer size boundary.
1105 *
1106 * Cannot become less or equal zero as the buffer would have been
1107 * flushed on the last call then.
1108 */
1109 resid = bp->b_bufsize - dbtob(bp->b_blkno % btodb(bp->b_bufsize)) -
1110 bp->b_resid;
1111 KASSERT(resid > 0);
1112 KASSERT(dbtob(btodb(resid)) == resid);
1113
1114 if (len < resid)
1115 resid = len;
1116
1117 memcpy((uint8_t *)bp->b_data + bp->b_resid, data, resid);
1118 bp->b_resid += resid;
1119
1120 if (len >= resid) {
1121 /* Just filled the buf, or data did not fit */
1122 wapbl_buffered_write_async(wl, bp);
1123
1124 data = (uint8_t *)data + resid;
1125 len -= resid;
1126 pbn += btodb(resid);
1127
1128 if (len > 0)
1129 goto again;
1130 }
1131
1132 return 0;
1133 }
1134
1135 /*
1136 * wapbl_circ_write(wl, data, len, offp)
1137 *
1138 * Write len bytes from data to the circular queue of wl, starting
1139 * at linear byte offset *offp, and returning the new linear byte
1140 * offset in *offp.
1141 *
1142 * If the starting linear byte offset precedes wl->wl_circ_off,
1143 * the write instead begins at wl->wl_circ_off. XXX WTF? This
1144 * should be a KASSERT, not a conditional.
1145 *
1146 * The write is buffered in wl and must be flushed with
1147 * wapbl_buffered_flush before it will be submitted to the disk.
1148 */
1149 static int
1150 wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
1151 {
1152 size_t slen;
1153 off_t off = *offp;
1154 int error;
1155 daddr_t pbn;
1156
1157 KDASSERT(((len >> wl->wl_log_dev_bshift) <<
1158 wl->wl_log_dev_bshift) == len);
1159
1160 if (off < wl->wl_circ_off)
1161 off = wl->wl_circ_off;
1162 slen = wl->wl_circ_off + wl->wl_circ_size - off;
1163 if (slen < len) {
1164 pbn = wl->wl_logpbn + (off >> wl->wl_log_dev_bshift);
1165 #ifdef _KERNEL
1166 pbn = btodb(pbn << wl->wl_log_dev_bshift);
1167 #endif
1168 error = wapbl_buffered_write(data, slen, wl, pbn,
1169 WAPBL_JDATA_FLAGS(wl));
1170 if (error)
1171 return error;
1172 data = (uint8_t *)data + slen;
1173 len -= slen;
1174 off = wl->wl_circ_off;
1175 }
1176 pbn = wl->wl_logpbn + (off >> wl->wl_log_dev_bshift);
1177 #ifdef _KERNEL
1178 pbn = btodb(pbn << wl->wl_log_dev_bshift);
1179 #endif
1180 error = wapbl_buffered_write(data, len, wl, pbn,
1181 WAPBL_JDATA_FLAGS(wl));
1182 if (error)
1183 return error;
1184 off += len;
1185 if (off >= wl->wl_circ_off + wl->wl_circ_size)
1186 off = wl->wl_circ_off;
1187 *offp = off;
1188 return 0;
1189 }
1190
1191 /****************************************************************/
1192 /*
1193 * WAPBL transactions: entering, adding/removing bufs, and exiting
1194 */
1195
1196 int
1197 wapbl_begin(struct wapbl *wl, const char *file, int line)
1198 {
1199 int doflush;
1200 unsigned lockcount;
1201
1202 KDASSERT(wl);
1203
1204 /*
1205 * XXX this needs to be made much more sophisticated.
1206 * perhaps each wapbl_begin could reserve a specified
1207 * number of buffers and bytes.
1208 */
1209 mutex_enter(&wl->wl_mtx);
1210 lockcount = wl->wl_lock_count;
1211 doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) >
1212 wl->wl_bufbytes_max / 2) ||
1213 ((wl->wl_bufcount + (lockcount * 10)) >
1214 wl->wl_bufcount_max / 2) ||
1215 (wapbl_transaction_len(wl) > wl->wl_circ_size / 2) ||
1216 (wl->wl_dealloccnt >= (wl->wl_dealloclim / 2));
1217 mutex_exit(&wl->wl_mtx);
1218
1219 if (doflush) {
1220 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1221 ("force flush lockcnt=%d bufbytes=%zu "
1222 "(max=%zu) bufcount=%zu (max=%zu) "
1223 "dealloccnt %d (lim=%d)\n",
1224 lockcount, wl->wl_bufbytes,
1225 wl->wl_bufbytes_max, wl->wl_bufcount,
1226 wl->wl_bufcount_max,
1227 wl->wl_dealloccnt, wl->wl_dealloclim));
1228 }
1229
1230 if (doflush) {
1231 int error = wapbl_flush(wl, 0);
1232 if (error)
1233 return error;
1234 }
1235
1236 rw_enter(&wl->wl_rwlock, RW_READER);
1237 mutex_enter(&wl->wl_mtx);
1238 wl->wl_lock_count++;
1239 mutex_exit(&wl->wl_mtx);
1240
1241 #if defined(WAPBL_DEBUG_PRINT)
1242 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
1243 ("wapbl_begin thread %d.%d with bufcount=%zu "
1244 "bufbytes=%zu bcount=%zu at %s:%d\n",
1245 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1246 wl->wl_bufbytes, wl->wl_bcount, file, line));
1247 #endif
1248
1249 return 0;
1250 }
1251
1252 void
1253 wapbl_end(struct wapbl *wl)
1254 {
1255
1256 #if defined(WAPBL_DEBUG_PRINT)
1257 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
1258 ("wapbl_end thread %d.%d with bufcount=%zu "
1259 "bufbytes=%zu bcount=%zu\n",
1260 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1261 wl->wl_bufbytes, wl->wl_bcount));
1262 #endif
1263
1264 /*
1265 * XXX this could be handled more gracefully, perhaps place
1266 * only a partial transaction in the log and allow the
1267 * remaining to flush without the protection of the journal.
1268 */
1269 KASSERTMSG((wapbl_transaction_len(wl) <=
1270 (wl->wl_circ_size - wl->wl_reserved_bytes)),
1271 "wapbl_end: current transaction too big to flush");
1272
1273 mutex_enter(&wl->wl_mtx);
1274 KASSERT(wl->wl_lock_count > 0);
1275 wl->wl_lock_count--;
1276 mutex_exit(&wl->wl_mtx);
1277
1278 rw_exit(&wl->wl_rwlock);
1279 }
1280
1281 void
1282 wapbl_add_buf(struct wapbl *wl, struct buf * bp)
1283 {
1284
1285 KASSERT(bp->b_cflags & BC_BUSY);
1286 KASSERT(bp->b_vp);
1287
1288 wapbl_jlock_assert(wl);
1289
1290 #if 0
1291 /*
1292 * XXX this might be an issue for swapfiles.
1293 * see uvm_swap.c:1702
1294 *
1295 * XXX2 why require it then? leap of semantics?
1296 */
1297 KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
1298 #endif
1299
1300 mutex_enter(&wl->wl_mtx);
1301 if (bp->b_flags & B_LOCKED) {
1302 TAILQ_REMOVE(&wl->wl_bufs, bp, b_wapbllist);
1303 WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
1304 ("wapbl_add_buf thread %d.%d re-adding buf %p "
1305 "with %d bytes %d bcount\n",
1306 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
1307 bp->b_bcount));
1308 } else {
1309 /* unlocked by dirty buffers shouldn't exist */
1310 KASSERT(!(bp->b_oflags & BO_DELWRI));
1311 wl->wl_bufbytes += bp->b_bufsize;
1312 wl->wl_bcount += bp->b_bcount;
1313 wl->wl_bufcount++;
1314 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
1315 ("wapbl_add_buf thread %d.%d adding buf %p "
1316 "with %d bytes %d bcount\n",
1317 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
1318 bp->b_bcount));
1319 }
1320 TAILQ_INSERT_TAIL(&wl->wl_bufs, bp, b_wapbllist);
1321 mutex_exit(&wl->wl_mtx);
1322
1323 bp->b_flags |= B_LOCKED;
1324 }
1325
1326 static void
1327 wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
1328 {
1329
1330 KASSERT(mutex_owned(&wl->wl_mtx));
1331 KASSERT(bp->b_cflags & BC_BUSY);
1332 wapbl_jlock_assert(wl);
1333
1334 #if 0
1335 /*
1336 * XXX this might be an issue for swapfiles.
1337 * see uvm_swap.c:1725
1338 *
1339 * XXXdeux: see above
1340 */
1341 KASSERT((bp->b_flags & BC_NOCACHE) == 0);
1342 #endif
1343 KASSERT(bp->b_flags & B_LOCKED);
1344
1345 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
1346 ("wapbl_remove_buf thread %d.%d removing buf %p with "
1347 "%d bytes %d bcount\n",
1348 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, bp->b_bcount));
1349
1350 KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
1351 wl->wl_bufbytes -= bp->b_bufsize;
1352 KASSERT(wl->wl_bcount >= bp->b_bcount);
1353 wl->wl_bcount -= bp->b_bcount;
1354 KASSERT(wl->wl_bufcount > 0);
1355 wl->wl_bufcount--;
1356 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1357 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1358 TAILQ_REMOVE(&wl->wl_bufs, bp, b_wapbllist);
1359
1360 bp->b_flags &= ~B_LOCKED;
1361 }
1362
1363 /* called from brelsel() in vfs_bio among other places */
1364 void
1365 wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
1366 {
1367
1368 mutex_enter(&wl->wl_mtx);
1369 wapbl_remove_buf_locked(wl, bp);
1370 mutex_exit(&wl->wl_mtx);
1371 }
1372
1373 void
1374 wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
1375 {
1376
1377 KASSERT(bp->b_cflags & BC_BUSY);
1378
1379 /*
1380 * XXX: why does this depend on B_LOCKED? otherwise the buf
1381 * is not for a transaction? if so, why is this called in the
1382 * first place?
1383 */
1384 if (bp->b_flags & B_LOCKED) {
1385 mutex_enter(&wl->wl_mtx);
1386 wl->wl_bufbytes += bp->b_bufsize - oldsz;
1387 wl->wl_bcount += bp->b_bcount - oldcnt;
1388 mutex_exit(&wl->wl_mtx);
1389 }
1390 }
1391
1392 #endif /* _KERNEL */
1393
1394 /****************************************************************/
1395 /* Some utility inlines */
1396
1397 /*
1398 * wapbl_space_used(avail, head, tail)
1399 *
1400 * Number of bytes used in a circular queue of avail total bytes,
1401 * from tail to head.
1402 */
1403 static inline size_t
1404 wapbl_space_used(size_t avail, off_t head, off_t tail)
1405 {
1406
1407 if (tail == 0) {
1408 KASSERT(head == 0);
1409 return 0;
1410 }
1411 return ((head + (avail - 1) - tail) % avail) + 1;
1412 }
1413
1414 #ifdef _KERNEL
1415 /*
1416 * wapbl_advance(size, off, oldoff, delta)
1417 *
1418 * Given a byte offset oldoff into a circular queue of size bytes
1419 * starting at off, return a new byte offset oldoff + delta into
1420 * the circular queue.
1421 */
1422 static inline off_t
1423 wapbl_advance(size_t size, size_t off, off_t oldoff, size_t delta)
1424 {
1425 off_t newoff;
1426
1427 /* Define acceptable ranges for inputs. */
1428 KASSERT(delta <= (size_t)size);
1429 KASSERT((oldoff == 0) || ((size_t)oldoff >= off));
1430 KASSERT(oldoff < (off_t)(size + off));
1431
1432 if ((oldoff == 0) && (delta != 0))
1433 newoff = off + delta;
1434 else if ((oldoff + delta) < (size + off))
1435 newoff = oldoff + delta;
1436 else
1437 newoff = (oldoff + delta) - size;
1438
1439 /* Note some interesting axioms */
1440 KASSERT((delta != 0) || (newoff == oldoff));
1441 KASSERT((delta == 0) || (newoff != 0));
1442 KASSERT((delta != (size)) || (newoff == oldoff));
1443
1444 /* Define acceptable ranges for output. */
1445 KASSERT((newoff == 0) || ((size_t)newoff >= off));
1446 KASSERT((size_t)newoff < (size + off));
1447 return newoff;
1448 }
1449
1450 /*
1451 * wapbl_space_free(avail, head, tail)
1452 *
1453 * Number of bytes free in a circular queue of avail total bytes,
1454 * in which everything from tail to head is used.
1455 */
1456 static inline size_t
1457 wapbl_space_free(size_t avail, off_t head, off_t tail)
1458 {
1459
1460 return avail - wapbl_space_used(avail, head, tail);
1461 }
1462
1463 /*
1464 * wapbl_advance_head(size, off, delta, headp, tailp)
1465 *
1466 * In a circular queue of size bytes starting at off, given the
1467 * old head and tail offsets *headp and *tailp, store the new head
1468 * and tail offsets in *headp and *tailp resulting from adding
1469 * delta bytes of data to the head.
1470 */
1471 static inline void
1472 wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1473 off_t *tailp)
1474 {
1475 off_t head = *headp;
1476 off_t tail = *tailp;
1477
1478 KASSERT(delta <= wapbl_space_free(size, head, tail));
1479 head = wapbl_advance(size, off, head, delta);
1480 if ((tail == 0) && (head != 0))
1481 tail = off;
1482 *headp = head;
1483 *tailp = tail;
1484 }
1485
1486 /*
1487 * wapbl_advance_tail(size, off, delta, headp, tailp)
1488 *
1489 * In a circular queue of size bytes starting at off, given the
1490 * old head and tail offsets *headp and *tailp, store the new head
1491 * and tail offsets in *headp and *tailp resulting from removing
1492 * delta bytes of data from the tail.
1493 */
1494 static inline void
1495 wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1496 off_t *tailp)
1497 {
1498 off_t head = *headp;
1499 off_t tail = *tailp;
1500
1501 KASSERT(delta <= wapbl_space_used(size, head, tail));
1502 tail = wapbl_advance(size, off, tail, delta);
1503 if (head == tail) {
1504 head = tail = 0;
1505 }
1506 *headp = head;
1507 *tailp = tail;
1508 }
1509
1510
1511 /****************************************************************/
1512
1513 /*
1514 * wapbl_truncate(wl, minfree)
1515 *
1516 * Wait until at least minfree bytes are available in the log.
1517 *
1518 * If it was necessary to wait for writes to complete,
1519 * advance the circular queue tail to reflect the new write
1520 * completions and issue a write commit to the log.
1521 *
1522 * => Caller must hold wl->wl_rwlock writer lock.
1523 */
1524 static int
1525 wapbl_truncate(struct wapbl *wl, size_t minfree)
1526 {
1527 size_t delta;
1528 size_t avail;
1529 off_t head;
1530 off_t tail;
1531 int error = 0;
1532
1533 KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1534 KASSERT(rw_write_held(&wl->wl_rwlock));
1535
1536 mutex_enter(&wl->wl_mtx);
1537
1538 /*
1539 * First check to see if we have to do a commit
1540 * at all.
1541 */
1542 avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1543 if (minfree < avail) {
1544 mutex_exit(&wl->wl_mtx);
1545 return 0;
1546 }
1547 minfree -= avail;
1548 while ((wl->wl_error_count == 0) &&
1549 (wl->wl_reclaimable_bytes < minfree)) {
1550 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1551 ("wapbl_truncate: sleeping on %p wl=%p bytes=%zd "
1552 "minfree=%zd\n",
1553 &wl->wl_reclaimable_bytes, wl, wl->wl_reclaimable_bytes,
1554 minfree));
1555
1556 cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1557 }
1558 if (wl->wl_reclaimable_bytes < minfree) {
1559 KASSERT(wl->wl_error_count);
1560 /* XXX maybe get actual error from buffer instead someday? */
1561 error = EIO;
1562 }
1563 head = wl->wl_head;
1564 tail = wl->wl_tail;
1565 delta = wl->wl_reclaimable_bytes;
1566
1567 /* If all of of the entries are flushed, then be sure to keep
1568 * the reserved bytes reserved. Watch out for discarded transactions,
1569 * which could leave more bytes reserved than are reclaimable.
1570 */
1571 if (SIMPLEQ_EMPTY(&wl->wl_entries) &&
1572 (delta >= wl->wl_reserved_bytes)) {
1573 delta -= wl->wl_reserved_bytes;
1574 }
1575 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1576 &tail);
1577 KDASSERT(wl->wl_reserved_bytes <=
1578 wapbl_space_used(wl->wl_circ_size, head, tail));
1579 mutex_exit(&wl->wl_mtx);
1580
1581 if (error)
1582 return error;
1583
1584 /*
1585 * This is where head, tail and delta are unprotected
1586 * from races against itself or flush. This is ok since
1587 * we only call this routine from inside flush itself.
1588 *
1589 * XXX: how can it race against itself when accessed only
1590 * from behind the write-locked rwlock?
1591 */
1592 error = wapbl_write_commit(wl, head, tail);
1593 if (error)
1594 return error;
1595
1596 wl->wl_head = head;
1597 wl->wl_tail = tail;
1598
1599 mutex_enter(&wl->wl_mtx);
1600 KASSERT(wl->wl_reclaimable_bytes >= delta);
1601 wl->wl_reclaimable_bytes -= delta;
1602 mutex_exit(&wl->wl_mtx);
1603 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1604 ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1605 curproc->p_pid, curlwp->l_lid, delta));
1606
1607 return 0;
1608 }
1609
1610 /****************************************************************/
1611
1612 void
1613 wapbl_biodone(struct buf *bp)
1614 {
1615 struct wapbl_entry *we = bp->b_private;
1616 struct wapbl *wl = we->we_wapbl;
1617 #ifdef WAPBL_DEBUG_BUFBYTES
1618 const int bufsize = bp->b_bufsize;
1619 #endif
1620
1621 /*
1622 * Handle possible flushing of buffers after log has been
1623 * decomissioned.
1624 */
1625 if (!wl) {
1626 KASSERT(we->we_bufcount > 0);
1627 we->we_bufcount--;
1628 #ifdef WAPBL_DEBUG_BUFBYTES
1629 KASSERT(we->we_unsynced_bufbytes >= bufsize);
1630 we->we_unsynced_bufbytes -= bufsize;
1631 #endif
1632
1633 if (we->we_bufcount == 0) {
1634 #ifdef WAPBL_DEBUG_BUFBYTES
1635 KASSERT(we->we_unsynced_bufbytes == 0);
1636 #endif
1637 pool_put(&wapbl_entry_pool, we);
1638 }
1639
1640 brelse(bp, 0);
1641 return;
1642 }
1643
1644 #ifdef ohbother
1645 KDASSERT(bp->b_oflags & BO_DONE);
1646 KDASSERT(!(bp->b_oflags & BO_DELWRI));
1647 KDASSERT(bp->b_flags & B_ASYNC);
1648 KDASSERT(bp->b_cflags & BC_BUSY);
1649 KDASSERT(!(bp->b_flags & B_LOCKED));
1650 KDASSERT(!(bp->b_flags & B_READ));
1651 KDASSERT(!(bp->b_cflags & BC_INVAL));
1652 KDASSERT(!(bp->b_cflags & BC_NOCACHE));
1653 #endif
1654
1655 if (bp->b_error) {
1656 /*
1657 * If an error occurs, it would be nice to leave the buffer
1658 * as a delayed write on the LRU queue so that we can retry
1659 * it later. But buffercache(9) can't handle dirty buffer
1660 * reuse, so just mark the log permanently errored out.
1661 */
1662 mutex_enter(&wl->wl_mtx);
1663 if (wl->wl_error_count == 0) {
1664 wl->wl_error_count++;
1665 cv_broadcast(&wl->wl_reclaimable_cv);
1666 }
1667 mutex_exit(&wl->wl_mtx);
1668 }
1669
1670 /*
1671 * Make sure that the buf doesn't retain the media flags, so that
1672 * e.g. wapbl_allow_fuadpo has immediate effect on any following I/O.
1673 * The flags will be set again if needed by another I/O.
1674 */
1675 bp->b_flags &= ~B_MEDIA_FLAGS;
1676
1677 /*
1678 * Release the buffer here. wapbl_flush() may wait for the
1679 * log to become empty and we better unbusy the buffer before
1680 * wapbl_flush() returns.
1681 */
1682 brelse(bp, 0);
1683
1684 mutex_enter(&wl->wl_mtx);
1685
1686 KASSERT(we->we_bufcount > 0);
1687 we->we_bufcount--;
1688 #ifdef WAPBL_DEBUG_BUFBYTES
1689 KASSERT(we->we_unsynced_bufbytes >= bufsize);
1690 we->we_unsynced_bufbytes -= bufsize;
1691 KASSERT(wl->wl_unsynced_bufbytes >= bufsize);
1692 wl->wl_unsynced_bufbytes -= bufsize;
1693 #endif
1694 wl->wl_ev_metawrite.ev_count++;
1695
1696 /*
1697 * If the current transaction can be reclaimed, start
1698 * at the beginning and reclaim any consecutive reclaimable
1699 * transactions. If we successfully reclaim anything,
1700 * then wakeup anyone waiting for the reclaim.
1701 */
1702 if (we->we_bufcount == 0) {
1703 size_t delta = 0;
1704 int errcnt = 0;
1705 #ifdef WAPBL_DEBUG_BUFBYTES
1706 KDASSERT(we->we_unsynced_bufbytes == 0);
1707 #endif
1708 /*
1709 * clear any posted error, since the buffer it came from
1710 * has successfully flushed by now
1711 */
1712 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1713 (we->we_bufcount == 0)) {
1714 delta += we->we_reclaimable_bytes;
1715 if (we->we_error)
1716 errcnt++;
1717 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1718 pool_put(&wapbl_entry_pool, we);
1719 }
1720
1721 if (delta) {
1722 wl->wl_reclaimable_bytes += delta;
1723 KASSERT(wl->wl_error_count >= errcnt);
1724 wl->wl_error_count -= errcnt;
1725 cv_broadcast(&wl->wl_reclaimable_cv);
1726 }
1727 }
1728
1729 mutex_exit(&wl->wl_mtx);
1730 }
1731
1732 /*
1733 * wapbl_flush(wl, wait)
1734 *
1735 * Flush pending block writes, deallocations, and inodes from
1736 * the current transaction in memory to the log on disk:
1737 *
1738 * 1. Call the file system's wl_flush callback to flush any
1739 * per-file-system pending updates.
1740 * 2. Wait for enough space in the log for the current transaction.
1741 * 3. Synchronously write the new log records, advancing the
1742 * circular queue head.
1743 * 4. Issue the pending block writes asynchronously, now that they
1744 * are recorded in the log and can be replayed after crash.
1745 * 5. If wait is true, wait for all writes to complete and for the
1746 * log to become empty.
1747 *
1748 * On failure, call the file system's wl_flush_abort callback.
1749 */
1750 int
1751 wapbl_flush(struct wapbl *wl, int waitfor)
1752 {
1753 struct buf *bp;
1754 struct wapbl_entry *we;
1755 off_t off;
1756 off_t head;
1757 off_t tail;
1758 size_t delta = 0;
1759 size_t flushsize;
1760 size_t reserved;
1761 int error = 0;
1762
1763 /*
1764 * Do a quick check to see if a full flush can be skipped
1765 * This assumes that the flush callback does not need to be called
1766 * unless there are other outstanding bufs.
1767 */
1768 if (!waitfor) {
1769 size_t nbufs;
1770 mutex_enter(&wl->wl_mtx); /* XXX need mutex here to
1771 protect the KASSERTS */
1772 nbufs = wl->wl_bufcount;
1773 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1774 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1775 mutex_exit(&wl->wl_mtx);
1776 if (nbufs == 0)
1777 return 0;
1778 }
1779
1780 /*
1781 * XXX we may consider using LK_UPGRADE here
1782 * if we want to call flush from inside a transaction
1783 */
1784 rw_enter(&wl->wl_rwlock, RW_WRITER);
1785 wl->wl_flush(wl->wl_mount, TAILQ_FIRST(&wl->wl_dealloclist));
1786
1787 /*
1788 * Now that we are exclusively locked and the file system has
1789 * issued any deferred block writes for this transaction, check
1790 * whether there are any blocks to write to the log. If not,
1791 * skip waiting for space or writing any log entries.
1792 *
1793 * XXX Shouldn't this also check wl_dealloccnt and
1794 * wl_inohashcnt? Perhaps wl_dealloccnt doesn't matter if the
1795 * file system didn't produce any blocks as a consequence of
1796 * it, but the same does not seem to be so of wl_inohashcnt.
1797 */
1798 if (wl->wl_bufcount == 0) {
1799 goto wait_out;
1800 }
1801
1802 #if 0
1803 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1804 ("wapbl_flush thread %d.%d flushing entries with "
1805 "bufcount=%zu bufbytes=%zu\n",
1806 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1807 wl->wl_bufbytes));
1808 #endif
1809
1810 /* Calculate amount of space needed to flush */
1811 flushsize = wapbl_transaction_len(wl);
1812 if (wapbl_verbose_commit) {
1813 struct timespec ts;
1814 getnanotime(&ts);
1815 printf("%s: %lld.%09ld this transaction = %zu bytes\n",
1816 __func__, (long long)ts.tv_sec,
1817 (long)ts.tv_nsec, flushsize);
1818 }
1819
1820 if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1821 /*
1822 * XXX this could be handled more gracefully, perhaps place
1823 * only a partial transaction in the log and allow the
1824 * remaining to flush without the protection of the journal.
1825 */
1826 panic("wapbl_flush: current transaction too big to flush");
1827 }
1828
1829 error = wapbl_truncate(wl, flushsize);
1830 if (error)
1831 goto out;
1832
1833 off = wl->wl_head;
1834 KASSERT((off == 0) || (off >= wl->wl_circ_off));
1835 KASSERT((off == 0) || (off < wl->wl_circ_off + wl->wl_circ_size));
1836 error = wapbl_write_blocks(wl, &off);
1837 if (error)
1838 goto out;
1839 error = wapbl_write_revocations(wl, &off);
1840 if (error)
1841 goto out;
1842 error = wapbl_write_inodes(wl, &off);
1843 if (error)
1844 goto out;
1845
1846 reserved = 0;
1847 if (wl->wl_inohashcnt)
1848 reserved = wapbl_transaction_inodes_len(wl);
1849
1850 head = wl->wl_head;
1851 tail = wl->wl_tail;
1852
1853 wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1854 &head, &tail);
1855
1856 KASSERTMSG(head == off,
1857 "lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1858 " off=%"PRIdMAX" flush=%zu",
1859 (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1860 flushsize);
1861
1862 /* Opportunistically move the tail forward if we can */
1863 mutex_enter(&wl->wl_mtx);
1864 delta = wl->wl_reclaimable_bytes;
1865 mutex_exit(&wl->wl_mtx);
1866 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1867 &head, &tail);
1868
1869 error = wapbl_write_commit(wl, head, tail);
1870 if (error)
1871 goto out;
1872
1873 we = pool_get(&wapbl_entry_pool, PR_WAITOK);
1874
1875 #ifdef WAPBL_DEBUG_BUFBYTES
1876 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1877 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1878 " unsynced=%zu"
1879 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1880 "inodes=%d\n",
1881 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1882 wapbl_space_used(wl->wl_circ_size, head, tail),
1883 wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1884 wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1885 wl->wl_inohashcnt));
1886 #else
1887 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1888 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1889 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1890 "inodes=%d\n",
1891 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1892 wapbl_space_used(wl->wl_circ_size, head, tail),
1893 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1894 wl->wl_dealloccnt, wl->wl_inohashcnt));
1895 #endif
1896
1897
1898 mutex_enter(&bufcache_lock);
1899 mutex_enter(&wl->wl_mtx);
1900
1901 wl->wl_reserved_bytes = reserved;
1902 wl->wl_head = head;
1903 wl->wl_tail = tail;
1904 KASSERT(wl->wl_reclaimable_bytes >= delta);
1905 wl->wl_reclaimable_bytes -= delta;
1906 KDASSERT(wl->wl_dealloccnt == 0);
1907 #ifdef WAPBL_DEBUG_BUFBYTES
1908 wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1909 #endif
1910
1911 we->we_wapbl = wl;
1912 we->we_bufcount = wl->wl_bufcount;
1913 #ifdef WAPBL_DEBUG_BUFBYTES
1914 we->we_unsynced_bufbytes = wl->wl_bufbytes;
1915 #endif
1916 we->we_reclaimable_bytes = flushsize;
1917 we->we_error = 0;
1918 SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1919
1920 /*
1921 * This flushes bufs in order than they were queued, so the LRU
1922 * order is preserved.
1923 */
1924 while ((bp = TAILQ_FIRST(&wl->wl_bufs)) != NULL) {
1925 if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1926 continue;
1927 }
1928 bp->b_iodone = wapbl_biodone;
1929 bp->b_private = we;
1930
1931 bremfree(bp);
1932 wapbl_remove_buf_locked(wl, bp);
1933 mutex_exit(&wl->wl_mtx);
1934 mutex_exit(&bufcache_lock);
1935 bawrite(bp);
1936 mutex_enter(&bufcache_lock);
1937 mutex_enter(&wl->wl_mtx);
1938 }
1939 mutex_exit(&wl->wl_mtx);
1940 mutex_exit(&bufcache_lock);
1941
1942 #if 0
1943 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1944 ("wapbl_flush thread %d.%d done flushing entries...\n",
1945 curproc->p_pid, curlwp->l_lid));
1946 #endif
1947
1948 wait_out:
1949
1950 /*
1951 * If the waitfor flag is set, don't return until everything is
1952 * fully flushed and the on disk log is empty.
1953 */
1954 if (waitfor) {
1955 error = wapbl_truncate(wl, wl->wl_circ_size -
1956 wl->wl_reserved_bytes);
1957 }
1958
1959 out:
1960 if (error) {
1961 wl->wl_flush_abort(wl->wl_mount,
1962 TAILQ_FIRST(&wl->wl_dealloclist));
1963 }
1964
1965 #ifdef WAPBL_DEBUG_PRINT
1966 if (error) {
1967 pid_t pid = -1;
1968 lwpid_t lid = -1;
1969 if (curproc)
1970 pid = curproc->p_pid;
1971 if (curlwp)
1972 lid = curlwp->l_lid;
1973 mutex_enter(&wl->wl_mtx);
1974 #ifdef WAPBL_DEBUG_BUFBYTES
1975 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1976 ("wapbl_flush: thread %d.%d aborted flush: "
1977 "error = %d\n"
1978 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1979 "deallocs=%d inodes=%d\n"
1980 "\terrcnt = %d, reclaimable=%zu reserved=%zu "
1981 "unsynced=%zu\n",
1982 pid, lid, error, wl->wl_bufcount,
1983 wl->wl_bufbytes, wl->wl_bcount,
1984 wl->wl_dealloccnt, wl->wl_inohashcnt,
1985 wl->wl_error_count, wl->wl_reclaimable_bytes,
1986 wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1987 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1988 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1989 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1990 "error = %d, unsynced = %zu\n",
1991 we->we_bufcount, we->we_reclaimable_bytes,
1992 we->we_error, we->we_unsynced_bufbytes));
1993 }
1994 #else
1995 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1996 ("wapbl_flush: thread %d.%d aborted flush: "
1997 "error = %d\n"
1998 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1999 "deallocs=%d inodes=%d\n"
2000 "\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
2001 pid, lid, error, wl->wl_bufcount,
2002 wl->wl_bufbytes, wl->wl_bcount,
2003 wl->wl_dealloccnt, wl->wl_inohashcnt,
2004 wl->wl_error_count, wl->wl_reclaimable_bytes,
2005 wl->wl_reserved_bytes));
2006 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
2007 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
2008 ("\tentry: bufcount = %zu, reclaimable = %zu, "
2009 "error = %d\n", we->we_bufcount,
2010 we->we_reclaimable_bytes, we->we_error));
2011 }
2012 #endif
2013 mutex_exit(&wl->wl_mtx);
2014 }
2015 #endif
2016
2017 rw_exit(&wl->wl_rwlock);
2018 return error;
2019 }
2020
2021 /****************************************************************/
2022
2023 void
2024 wapbl_jlock_assert(struct wapbl *wl)
2025 {
2026
2027 KASSERT(rw_lock_held(&wl->wl_rwlock));
2028 }
2029
2030 void
2031 wapbl_junlock_assert(struct wapbl *wl)
2032 {
2033
2034 KASSERT(!rw_write_held(&wl->wl_rwlock));
2035 }
2036
2037 /****************************************************************/
2038
2039 /* locks missing */
2040 void
2041 wapbl_print(struct wapbl *wl,
2042 int full,
2043 void (*pr)(const char *, ...))
2044 {
2045 struct buf *bp;
2046 struct wapbl_entry *we;
2047 (*pr)("wapbl %p", wl);
2048 (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
2049 wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
2050 (*pr)("circ = %zu, header = %zu, head = %"PRIdMAX" tail = %"PRIdMAX"\n",
2051 wl->wl_circ_size, wl->wl_circ_off,
2052 (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
2053 (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
2054 wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
2055 #ifdef WAPBL_DEBUG_BUFBYTES
2056 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
2057 "reserved = %zu errcnt = %d unsynced = %zu\n",
2058 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
2059 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
2060 wl->wl_error_count, wl->wl_unsynced_bufbytes);
2061 #else
2062 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
2063 "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
2064 wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
2065 wl->wl_error_count);
2066 #endif
2067 (*pr)("\tdealloccnt = %d, dealloclim = %d\n",
2068 wl->wl_dealloccnt, wl->wl_dealloclim);
2069 (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
2070 wl->wl_inohashcnt, wl->wl_inohashmask);
2071 (*pr)("entries:\n");
2072 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
2073 #ifdef WAPBL_DEBUG_BUFBYTES
2074 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
2075 "unsynced = %zu\n",
2076 we->we_bufcount, we->we_reclaimable_bytes,
2077 we->we_error, we->we_unsynced_bufbytes);
2078 #else
2079 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
2080 we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
2081 #endif
2082 }
2083 if (full) {
2084 int cnt = 0;
2085 (*pr)("bufs =");
2086 TAILQ_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
2087 if (!TAILQ_NEXT(bp, b_wapbllist)) {
2088 (*pr)(" %p", bp);
2089 } else if ((++cnt % 6) == 0) {
2090 (*pr)(" %p,\n\t", bp);
2091 } else {
2092 (*pr)(" %p,", bp);
2093 }
2094 }
2095 (*pr)("\n");
2096
2097 (*pr)("dealloced blks = ");
2098 {
2099 struct wapbl_dealloc *wd;
2100 cnt = 0;
2101 TAILQ_FOREACH(wd, &wl->wl_dealloclist, wd_entries) {
2102 (*pr)(" %"PRId64":%d,",
2103 wd->wd_blkno,
2104 wd->wd_len);
2105 if ((++cnt % 4) == 0) {
2106 (*pr)("\n\t");
2107 }
2108 }
2109 }
2110 (*pr)("\n");
2111
2112 (*pr)("registered inodes = ");
2113 {
2114 int i;
2115 cnt = 0;
2116 for (i = 0; i <= wl->wl_inohashmask; i++) {
2117 struct wapbl_ino_head *wih;
2118 struct wapbl_ino *wi;
2119
2120 wih = &wl->wl_inohash[i];
2121 LIST_FOREACH(wi, wih, wi_hash) {
2122 if (wi->wi_ino == 0)
2123 continue;
2124 (*pr)(" %"PRIu64"/0%06"PRIo32",",
2125 wi->wi_ino, wi->wi_mode);
2126 if ((++cnt % 4) == 0) {
2127 (*pr)("\n\t");
2128 }
2129 }
2130 }
2131 (*pr)("\n");
2132 }
2133
2134 (*pr)("iobufs free =");
2135 TAILQ_FOREACH(bp, &wl->wl_iobufs, b_wapbllist) {
2136 if (!TAILQ_NEXT(bp, b_wapbllist)) {
2137 (*pr)(" %p", bp);
2138 } else if ((++cnt % 6) == 0) {
2139 (*pr)(" %p,\n\t", bp);
2140 } else {
2141 (*pr)(" %p,", bp);
2142 }
2143 }
2144 (*pr)("\n");
2145
2146 (*pr)("iobufs busy =");
2147 TAILQ_FOREACH(bp, &wl->wl_iobufs_busy, b_wapbllist) {
2148 if (!TAILQ_NEXT(bp, b_wapbllist)) {
2149 (*pr)(" %p", bp);
2150 } else if ((++cnt % 6) == 0) {
2151 (*pr)(" %p,\n\t", bp);
2152 } else {
2153 (*pr)(" %p,", bp);
2154 }
2155 }
2156 (*pr)("\n");
2157 }
2158 }
2159
2160 #if defined(WAPBL_DEBUG) || defined(DDB)
2161 void
2162 wapbl_dump(struct wapbl *wl)
2163 {
2164 #if defined(WAPBL_DEBUG)
2165 if (!wl)
2166 wl = wapbl_debug_wl;
2167 #endif
2168 if (!wl)
2169 return;
2170 wapbl_print(wl, 1, printf);
2171 }
2172 #endif
2173
2174 /****************************************************************/
2175
2176 int
2177 wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len, bool force,
2178 void **cookiep)
2179 {
2180 struct wapbl_dealloc *wd;
2181 int error = 0;
2182
2183 wapbl_jlock_assert(wl);
2184
2185 mutex_enter(&wl->wl_mtx);
2186
2187 if (__predict_false(wl->wl_dealloccnt >= wl->wl_dealloclim)) {
2188 if (!force) {
2189 error = EAGAIN;
2190 goto out;
2191 }
2192
2193 /*
2194 * Forced registration can only be used when:
2195 * 1) the caller can't cope with failure
2196 * 2) the path can be triggered only bounded, small
2197 * times per transaction
2198 * If this is not fullfilled, and the path would be triggered
2199 * many times, this could overflow maximum transaction size
2200 * and panic later.
2201 */
2202 printf("%s: forced dealloc registration over limit: %d >= %d\n",
2203 wl->wl_mount->mnt_stat.f_mntonname,
2204 wl->wl_dealloccnt, wl->wl_dealloclim);
2205 }
2206
2207 wl->wl_dealloccnt++;
2208 mutex_exit(&wl->wl_mtx);
2209
2210 wd = pool_get(&wapbl_dealloc_pool, PR_WAITOK);
2211 wd->wd_blkno = blk;
2212 wd->wd_len = len;
2213
2214 mutex_enter(&wl->wl_mtx);
2215 TAILQ_INSERT_TAIL(&wl->wl_dealloclist, wd, wd_entries);
2216
2217 if (cookiep)
2218 *cookiep = wd;
2219
2220 out:
2221 mutex_exit(&wl->wl_mtx);
2222
2223 WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
2224 ("wapbl_register_deallocation: blk=%"PRId64" len=%d error=%d\n",
2225 blk, len, error));
2226
2227 return error;
2228 }
2229
2230 static void
2231 wapbl_deallocation_free(struct wapbl *wl, struct wapbl_dealloc *wd,
2232 bool locked)
2233 {
2234 KASSERT(!locked
2235 || rw_lock_held(&wl->wl_rwlock) || mutex_owned(&wl->wl_mtx));
2236
2237 if (!locked)
2238 mutex_enter(&wl->wl_mtx);
2239
2240 TAILQ_REMOVE(&wl->wl_dealloclist, wd, wd_entries);
2241 wl->wl_dealloccnt--;
2242
2243 if (!locked)
2244 mutex_exit(&wl->wl_mtx);
2245
2246 pool_put(&wapbl_dealloc_pool, wd);
2247 }
2248
2249 void
2250 wapbl_unregister_deallocation(struct wapbl *wl, void *cookie)
2251 {
2252 KASSERT(cookie != NULL);
2253 wapbl_deallocation_free(wl, cookie, false);
2254 }
2255
2256 /****************************************************************/
2257
2258 static void
2259 wapbl_inodetrk_init(struct wapbl *wl, u_int size)
2260 {
2261
2262 wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
2263 if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) {
2264 pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0,
2265 "wapblinopl", &pool_allocator_nointr, IPL_NONE);
2266 }
2267 }
2268
2269 static void
2270 wapbl_inodetrk_free(struct wapbl *wl)
2271 {
2272
2273 /* XXX this KASSERT needs locking/mutex analysis */
2274 KASSERT(wl->wl_inohashcnt == 0);
2275 hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
2276 if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) {
2277 pool_destroy(&wapbl_ino_pool);
2278 }
2279 }
2280
2281 static struct wapbl_ino *
2282 wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
2283 {
2284 struct wapbl_ino_head *wih;
2285 struct wapbl_ino *wi;
2286
2287 KASSERT(mutex_owned(&wl->wl_mtx));
2288
2289 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
2290 LIST_FOREACH(wi, wih, wi_hash) {
2291 if (ino == wi->wi_ino)
2292 return wi;
2293 }
2294 return 0;
2295 }
2296
2297 void
2298 wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
2299 {
2300 struct wapbl_ino_head *wih;
2301 struct wapbl_ino *wi;
2302
2303 wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
2304
2305 mutex_enter(&wl->wl_mtx);
2306 if (wapbl_inodetrk_get(wl, ino) == NULL) {
2307 wi->wi_ino = ino;
2308 wi->wi_mode = mode;
2309 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
2310 LIST_INSERT_HEAD(wih, wi, wi_hash);
2311 wl->wl_inohashcnt++;
2312 WAPBL_PRINTF(WAPBL_PRINT_INODE,
2313 ("wapbl_register_inode: ino=%"PRId64"\n", ino));
2314 mutex_exit(&wl->wl_mtx);
2315 } else {
2316 mutex_exit(&wl->wl_mtx);
2317 pool_put(&wapbl_ino_pool, wi);
2318 }
2319 }
2320
2321 void
2322 wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
2323 {
2324 struct wapbl_ino *wi;
2325
2326 mutex_enter(&wl->wl_mtx);
2327 wi = wapbl_inodetrk_get(wl, ino);
2328 if (wi) {
2329 WAPBL_PRINTF(WAPBL_PRINT_INODE,
2330 ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
2331 KASSERT(wl->wl_inohashcnt > 0);
2332 wl->wl_inohashcnt--;
2333 LIST_REMOVE(wi, wi_hash);
2334 mutex_exit(&wl->wl_mtx);
2335
2336 pool_put(&wapbl_ino_pool, wi);
2337 } else {
2338 mutex_exit(&wl->wl_mtx);
2339 }
2340 }
2341
2342 /****************************************************************/
2343
2344 /*
2345 * wapbl_transaction_inodes_len(wl)
2346 *
2347 * Calculate the number of bytes required for inode registration
2348 * log records in wl.
2349 */
2350 static inline size_t
2351 wapbl_transaction_inodes_len(struct wapbl *wl)
2352 {
2353 int blocklen = 1<<wl->wl_log_dev_bshift;
2354 int iph;
2355
2356 /* Calculate number of inodes described in a inodelist header */
2357 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2358 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2359
2360 KASSERT(iph > 0);
2361
2362 return MAX(1, howmany(wl->wl_inohashcnt, iph)) * blocklen;
2363 }
2364
2365
2366 /*
2367 * wapbl_transaction_len(wl)
2368 *
2369 * Calculate number of bytes required for all log records in wl.
2370 */
2371 static size_t
2372 wapbl_transaction_len(struct wapbl *wl)
2373 {
2374 int blocklen = 1<<wl->wl_log_dev_bshift;
2375 size_t len;
2376
2377 /* Calculate number of blocks described in a blocklist header */
2378 len = wl->wl_bcount;
2379 len += howmany(wl->wl_bufcount, wl->wl_brperjblock) * blocklen;
2380 len += howmany(wl->wl_dealloccnt, wl->wl_brperjblock) * blocklen;
2381 len += wapbl_transaction_inodes_len(wl);
2382
2383 return len;
2384 }
2385
2386 /*
2387 * wapbl_cache_sync(wl, msg)
2388 *
2389 * Issue DIOCCACHESYNC to wl->wl_devvp.
2390 *
2391 * If sysctl(vfs.wapbl.verbose_commit) >= 2, print a message
2392 * including msg about the duration of the cache sync.
2393 */
2394 static int
2395 wapbl_cache_sync(struct wapbl *wl, const char *msg)
2396 {
2397 const bool verbose = wapbl_verbose_commit >= 2;
2398 struct bintime start_time;
2399 int force = 1;
2400 int error;
2401
2402 /* Skip full cache sync if disabled */
2403 if (!wapbl_flush_disk_cache) {
2404 return 0;
2405 }
2406 if (verbose) {
2407 bintime(&start_time);
2408 }
2409 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force,
2410 FWRITE, FSCRED);
2411 if (error) {
2412 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
2413 ("wapbl_cache_sync: DIOCCACHESYNC on dev 0x%jx "
2414 "returned %d\n", (uintmax_t)wl->wl_devvp->v_rdev, error));
2415 }
2416 if (verbose) {
2417 struct bintime d;
2418 struct timespec ts;
2419
2420 bintime(&d);
2421 bintime_sub(&d, &start_time);
2422 bintime2timespec(&d, &ts);
2423 printf("wapbl_cache_sync: %s: dev 0x%jx %ju.%09lu\n",
2424 msg, (uintmax_t)wl->wl_devvp->v_rdev,
2425 (uintmax_t)ts.tv_sec, ts.tv_nsec);
2426 }
2427
2428 wl->wl_ev_cacheflush.ev_count++;
2429
2430 return error;
2431 }
2432
2433 /*
2434 * wapbl_write_commit(wl, head, tail)
2435 *
2436 * Issue a disk cache sync to wait for all pending writes to the
2437 * log to complete, and then synchronously commit the current
2438 * circular queue head and tail to the log, in the next of two
2439 * locations for commit headers on disk.
2440 *
2441 * Increment the generation number. If the generation number
2442 * rolls over to zero, then a subsequent commit would appear to
2443 * have an older generation than this one -- in that case, issue a
2444 * duplicate commit to avoid this.
2445 *
2446 * => Caller must have exclusive access to wl, either by holding
2447 * wl->wl_rwlock for writer or by being wapbl_start before anyone
2448 * else has seen wl.
2449 */
2450 static int
2451 wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
2452 {
2453 struct wapbl_wc_header *wc = wl->wl_wc_header;
2454 struct timespec ts;
2455 int error;
2456 daddr_t pbn;
2457
2458 error = wapbl_buffered_flush(wl, true);
2459 if (error)
2460 return error;
2461 /*
2462 * Flush disk cache to ensure that blocks we've written are actually
2463 * written to the stable storage before the commit header.
2464 * This flushes to disk not only journal blocks, but also all
2465 * metadata blocks, written asynchronously since previous commit.
2466 *
2467 * XXX Calc checksum here, instead we do this for now
2468 */
2469 wapbl_cache_sync(wl, "1");
2470
2471 wc->wc_head = head;
2472 wc->wc_tail = tail;
2473 wc->wc_checksum = 0;
2474 wc->wc_version = 1;
2475 getnanotime(&ts);
2476 wc->wc_time = ts.tv_sec;
2477 wc->wc_timensec = ts.tv_nsec;
2478
2479 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2480 ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
2481 (intmax_t)head, (intmax_t)tail));
2482
2483 /*
2484 * write the commit header.
2485 *
2486 * XXX if generation will rollover, then first zero
2487 * over second commit header before trying to write both headers.
2488 */
2489
2490 pbn = wl->wl_logpbn + (wc->wc_generation % 2);
2491 #ifdef _KERNEL
2492 pbn = btodb(pbn << wc->wc_log_dev_bshift);
2493 #endif
2494 error = wapbl_buffered_write(wc, wc->wc_len, wl, pbn, WAPBL_JFLAGS(wl));
2495 if (error)
2496 return error;
2497 error = wapbl_buffered_flush(wl, true);
2498 if (error)
2499 return error;
2500
2501 /*
2502 * Flush disk cache to ensure that the commit header is actually
2503 * written before meta data blocks. Commit block is written using
2504 * FUA when enabled, in that case this flush is not needed.
2505 */
2506 if (!WAPBL_USE_FUA(wl))
2507 wapbl_cache_sync(wl, "2");
2508
2509 /*
2510 * If the generation number was zero, write it out a second time.
2511 * This handles initialization and generation number rollover
2512 */
2513 if (wc->wc_generation++ == 0) {
2514 error = wapbl_write_commit(wl, head, tail);
2515 /*
2516 * This panic should be able to be removed if we do the
2517 * zero'ing mentioned above, and we are certain to roll
2518 * back generation number on failure.
2519 */
2520 if (error)
2521 panic("wapbl_write_commit: error writing duplicate "
2522 "log header: %d", error);
2523 }
2524
2525 wl->wl_ev_commit.ev_count++;
2526
2527 return 0;
2528 }
2529
2530 /*
2531 * wapbl_write_blocks(wl, offp)
2532 *
2533 * Write all pending physical blocks in the current transaction
2534 * from wapbl_add_buf to the log on disk, adding to the circular
2535 * queue head at byte offset *offp, and returning the new head's
2536 * byte offset in *offp.
2537 */
2538 static int
2539 wapbl_write_blocks(struct wapbl *wl, off_t *offp)
2540 {
2541 struct wapbl_wc_blocklist *wc =
2542 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2543 int blocklen = 1<<wl->wl_log_dev_bshift;
2544 struct buf *bp;
2545 off_t off = *offp;
2546 int error;
2547 size_t padding;
2548
2549 KASSERT(rw_write_held(&wl->wl_rwlock));
2550
2551 bp = TAILQ_FIRST(&wl->wl_bufs);
2552
2553 while (bp) {
2554 int cnt;
2555 struct buf *obp = bp;
2556
2557 KASSERT(bp->b_flags & B_LOCKED);
2558
2559 wc->wc_type = WAPBL_WC_BLOCKS;
2560 wc->wc_len = blocklen;
2561 wc->wc_blkcount = 0;
2562 while (bp && (wc->wc_blkcount < wl->wl_brperjblock)) {
2563 /*
2564 * Make sure all the physical block numbers are up to
2565 * date. If this is not always true on a given
2566 * filesystem, then VOP_BMAP must be called. We
2567 * could call VOP_BMAP here, or else in the filesystem
2568 * specific flush callback, although neither of those
2569 * solutions allow us to take the vnode lock. If a
2570 * filesystem requires that we must take the vnode lock
2571 * to call VOP_BMAP, then we can probably do it in
2572 * bwrite when the vnode lock should already be held
2573 * by the invoking code.
2574 */
2575 KASSERT((bp->b_vp->v_type == VBLK) ||
2576 (bp->b_blkno != bp->b_lblkno));
2577 KASSERT(bp->b_blkno > 0);
2578
2579 wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
2580 wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
2581 wc->wc_len += bp->b_bcount;
2582 wc->wc_blkcount++;
2583 bp = TAILQ_NEXT(bp, b_wapbllist);
2584 }
2585 if (wc->wc_len % blocklen != 0) {
2586 padding = blocklen - wc->wc_len % blocklen;
2587 wc->wc_len += padding;
2588 } else {
2589 padding = 0;
2590 }
2591
2592 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2593 ("wapbl_write_blocks: len = %u (padding %zu) off = %"PRIdMAX"\n",
2594 wc->wc_len, padding, (intmax_t)off));
2595
2596 error = wapbl_circ_write(wl, wc, blocklen, &off);
2597 if (error)
2598 return error;
2599 bp = obp;
2600 cnt = 0;
2601 while (bp && (cnt++ < wl->wl_brperjblock)) {
2602 error = wapbl_circ_write(wl, bp->b_data,
2603 bp->b_bcount, &off);
2604 if (error)
2605 return error;
2606 bp = TAILQ_NEXT(bp, b_wapbllist);
2607 }
2608 if (padding) {
2609 void *zero;
2610
2611 zero = wapbl_alloc(padding);
2612 memset(zero, 0, padding);
2613 error = wapbl_circ_write(wl, zero, padding, &off);
2614 wapbl_free(zero, padding);
2615 if (error)
2616 return error;
2617 }
2618 }
2619 *offp = off;
2620 return 0;
2621 }
2622
2623 /*
2624 * wapbl_write_revocations(wl, offp)
2625 *
2626 * Write all pending deallocations in the current transaction from
2627 * wapbl_register_deallocation to the log on disk, adding to the
2628 * circular queue's head at byte offset *offp, and returning the
2629 * new head's byte offset in *offp.
2630 */
2631 static int
2632 wapbl_write_revocations(struct wapbl *wl, off_t *offp)
2633 {
2634 struct wapbl_wc_blocklist *wc =
2635 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2636 struct wapbl_dealloc *wd, *lwd;
2637 int blocklen = 1<<wl->wl_log_dev_bshift;
2638 off_t off = *offp;
2639 int error;
2640
2641 KASSERT(rw_write_held(&wl->wl_rwlock));
2642
2643 if (wl->wl_dealloccnt == 0)
2644 return 0;
2645
2646 while ((wd = TAILQ_FIRST(&wl->wl_dealloclist)) != NULL) {
2647 wc->wc_type = WAPBL_WC_REVOCATIONS;
2648 wc->wc_len = blocklen;
2649 wc->wc_blkcount = 0;
2650 while (wd && (wc->wc_blkcount < wl->wl_brperjblock)) {
2651 wc->wc_blocks[wc->wc_blkcount].wc_daddr =
2652 wd->wd_blkno;
2653 wc->wc_blocks[wc->wc_blkcount].wc_dlen =
2654 wd->wd_len;
2655 wc->wc_blkcount++;
2656
2657 wd = TAILQ_NEXT(wd, wd_entries);
2658 }
2659 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2660 ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
2661 wc->wc_len, (intmax_t)off));
2662 error = wapbl_circ_write(wl, wc, blocklen, &off);
2663 if (error)
2664 return error;
2665
2666 /* free all successfully written deallocs */
2667 lwd = wd;
2668 while ((wd = TAILQ_FIRST(&wl->wl_dealloclist)) != NULL) {
2669 if (wd == lwd)
2670 break;
2671 wapbl_deallocation_free(wl, wd, true);
2672 }
2673 }
2674 *offp = off;
2675 return 0;
2676 }
2677
2678 /*
2679 * wapbl_write_inodes(wl, offp)
2680 *
2681 * Write all pending inode allocations in the current transaction
2682 * from wapbl_register_inode to the log on disk, adding to the
2683 * circular queue's head at byte offset *offp and returning the
2684 * new head's byte offset in *offp.
2685 */
2686 static int
2687 wapbl_write_inodes(struct wapbl *wl, off_t *offp)
2688 {
2689 struct wapbl_wc_inodelist *wc =
2690 (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
2691 int i;
2692 int blocklen = 1 << wl->wl_log_dev_bshift;
2693 off_t off = *offp;
2694 int error;
2695
2696 struct wapbl_ino_head *wih;
2697 struct wapbl_ino *wi;
2698 int iph;
2699
2700 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2701 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2702
2703 i = 0;
2704 wih = &wl->wl_inohash[0];
2705 wi = 0;
2706 do {
2707 wc->wc_type = WAPBL_WC_INODES;
2708 wc->wc_len = blocklen;
2709 wc->wc_inocnt = 0;
2710 wc->wc_clear = (i == 0);
2711 while ((i < wl->wl_inohashcnt) && (wc->wc_inocnt < iph)) {
2712 while (!wi) {
2713 KASSERT((wih - &wl->wl_inohash[0])
2714 <= wl->wl_inohashmask);
2715 wi = LIST_FIRST(wih++);
2716 }
2717 wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2718 wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2719 wc->wc_inocnt++;
2720 i++;
2721 wi = LIST_NEXT(wi, wi_hash);
2722 }
2723 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2724 ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2725 wc->wc_len, (intmax_t)off));
2726 error = wapbl_circ_write(wl, wc, blocklen, &off);
2727 if (error)
2728 return error;
2729 } while (i < wl->wl_inohashcnt);
2730
2731 *offp = off;
2732 return 0;
2733 }
2734
2735 #endif /* _KERNEL */
2736
2737 /****************************************************************/
2738
2739 struct wapbl_blk {
2740 LIST_ENTRY(wapbl_blk) wb_hash;
2741 daddr_t wb_blk;
2742 off_t wb_off; /* Offset of this block in the log */
2743 };
2744 #define WAPBL_BLKPOOL_MIN 83
2745
2746 static void
2747 wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2748 {
2749 if (size < WAPBL_BLKPOOL_MIN)
2750 size = WAPBL_BLKPOOL_MIN;
2751 KASSERT(wr->wr_blkhash == 0);
2752 #ifdef _KERNEL
2753 wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2754 #else /* ! _KERNEL */
2755 /* Manually implement hashinit */
2756 {
2757 unsigned long i, hashsize;
2758 for (hashsize = 1; hashsize < size; hashsize <<= 1)
2759 continue;
2760 wr->wr_blkhash = wapbl_alloc(hashsize * sizeof(*wr->wr_blkhash));
2761 for (i = 0; i < hashsize; i++)
2762 LIST_INIT(&wr->wr_blkhash[i]);
2763 wr->wr_blkhashmask = hashsize - 1;
2764 }
2765 #endif /* ! _KERNEL */
2766 }
2767
2768 static void
2769 wapbl_blkhash_free(struct wapbl_replay *wr)
2770 {
2771 KASSERT(wr->wr_blkhashcnt == 0);
2772 #ifdef _KERNEL
2773 hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2774 #else /* ! _KERNEL */
2775 wapbl_free(wr->wr_blkhash,
2776 (wr->wr_blkhashmask + 1) * sizeof(*wr->wr_blkhash));
2777 #endif /* ! _KERNEL */
2778 }
2779
2780 static struct wapbl_blk *
2781 wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2782 {
2783 struct wapbl_blk_head *wbh;
2784 struct wapbl_blk *wb;
2785 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2786 LIST_FOREACH(wb, wbh, wb_hash) {
2787 if (blk == wb->wb_blk)
2788 return wb;
2789 }
2790 return 0;
2791 }
2792
2793 static void
2794 wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2795 {
2796 struct wapbl_blk_head *wbh;
2797 struct wapbl_blk *wb;
2798 wb = wapbl_blkhash_get(wr, blk);
2799 if (wb) {
2800 KASSERT(wb->wb_blk == blk);
2801 wb->wb_off = off;
2802 } else {
2803 wb = wapbl_alloc(sizeof(*wb));
2804 wb->wb_blk = blk;
2805 wb->wb_off = off;
2806 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2807 LIST_INSERT_HEAD(wbh, wb, wb_hash);
2808 wr->wr_blkhashcnt++;
2809 }
2810 }
2811
2812 static void
2813 wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2814 {
2815 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2816 if (wb) {
2817 KASSERT(wr->wr_blkhashcnt > 0);
2818 wr->wr_blkhashcnt--;
2819 LIST_REMOVE(wb, wb_hash);
2820 wapbl_free(wb, sizeof(*wb));
2821 }
2822 }
2823
2824 static void
2825 wapbl_blkhash_clear(struct wapbl_replay *wr)
2826 {
2827 unsigned long i;
2828 for (i = 0; i <= wr->wr_blkhashmask; i++) {
2829 struct wapbl_blk *wb;
2830
2831 while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2832 KASSERT(wr->wr_blkhashcnt > 0);
2833 wr->wr_blkhashcnt--;
2834 LIST_REMOVE(wb, wb_hash);
2835 wapbl_free(wb, sizeof(*wb));
2836 }
2837 }
2838 KASSERT(wr->wr_blkhashcnt == 0);
2839 }
2840
2841 /****************************************************************/
2842
2843 /*
2844 * wapbl_circ_read(wr, data, len, offp)
2845 *
2846 * Read len bytes into data from the circular queue of wr,
2847 * starting at the linear byte offset *offp, and returning the new
2848 * linear byte offset in *offp.
2849 *
2850 * If the starting linear byte offset precedes wr->wr_circ_off,
2851 * the read instead begins at wr->wr_circ_off. XXX WTF? This
2852 * should be a KASSERT, not a conditional.
2853 */
2854 static int
2855 wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2856 {
2857 size_t slen;
2858 off_t off = *offp;
2859 int error;
2860 daddr_t pbn;
2861
2862 KASSERT(((len >> wr->wr_log_dev_bshift) <<
2863 wr->wr_log_dev_bshift) == len);
2864
2865 if (off < wr->wr_circ_off)
2866 off = wr->wr_circ_off;
2867 slen = wr->wr_circ_off + wr->wr_circ_size - off;
2868 if (slen < len) {
2869 pbn = wr->wr_logpbn + (off >> wr->wr_log_dev_bshift);
2870 #ifdef _KERNEL
2871 pbn = btodb(pbn << wr->wr_log_dev_bshift);
2872 #endif
2873 error = wapbl_read(data, slen, wr->wr_devvp, pbn);
2874 if (error)
2875 return error;
2876 data = (uint8_t *)data + slen;
2877 len -= slen;
2878 off = wr->wr_circ_off;
2879 }
2880 pbn = wr->wr_logpbn + (off >> wr->wr_log_dev_bshift);
2881 #ifdef _KERNEL
2882 pbn = btodb(pbn << wr->wr_log_dev_bshift);
2883 #endif
2884 error = wapbl_read(data, len, wr->wr_devvp, pbn);
2885 if (error)
2886 return error;
2887 off += len;
2888 if (off >= wr->wr_circ_off + wr->wr_circ_size)
2889 off = wr->wr_circ_off;
2890 *offp = off;
2891 return 0;
2892 }
2893
2894 /*
2895 * wapbl_circ_advance(wr, len, offp)
2896 *
2897 * Compute the linear byte offset of the circular queue of wr that
2898 * is len bytes past *offp, and store it in *offp.
2899 *
2900 * This is as if wapbl_circ_read, but without actually reading
2901 * anything.
2902 *
2903 * If the starting linear byte offset precedes wr->wr_circ_off, it
2904 * is taken to be wr->wr_circ_off instead. XXX WTF? This should
2905 * be a KASSERT, not a conditional.
2906 */
2907 static void
2908 wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2909 {
2910 size_t slen;
2911 off_t off = *offp;
2912
2913 KASSERT(((len >> wr->wr_log_dev_bshift) <<
2914 wr->wr_log_dev_bshift) == len);
2915
2916 if (off < wr->wr_circ_off)
2917 off = wr->wr_circ_off;
2918 slen = wr->wr_circ_off + wr->wr_circ_size - off;
2919 if (slen < len) {
2920 len -= slen;
2921 off = wr->wr_circ_off;
2922 }
2923 off += len;
2924 if (off >= wr->wr_circ_off + wr->wr_circ_size)
2925 off = wr->wr_circ_off;
2926 *offp = off;
2927 }
2928
2929 /****************************************************************/
2930
2931 int
2932 wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2933 daddr_t off, size_t count, size_t blksize)
2934 {
2935 struct wapbl_replay *wr;
2936 int error;
2937 struct vnode *devvp;
2938 daddr_t logpbn;
2939 uint8_t *scratch;
2940 struct wapbl_wc_header *wch;
2941 struct wapbl_wc_header *wch2;
2942 /* Use this until we read the actual log header */
2943 int log_dev_bshift = ilog2(blksize);
2944 size_t used;
2945 daddr_t pbn;
2946
2947 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2948 ("wapbl_replay_start: vp=%p off=%"PRId64 " count=%zu blksize=%zu\n",
2949 vp, off, count, blksize));
2950
2951 if (off < 0)
2952 return EINVAL;
2953
2954 if (blksize < DEV_BSIZE)
2955 return EINVAL;
2956 if (blksize % DEV_BSIZE)
2957 return EINVAL;
2958
2959 #ifdef _KERNEL
2960 #if 0
2961 /* XXX vp->v_size isn't reliably set for VBLK devices,
2962 * especially root. However, we might still want to verify
2963 * that the full load is readable */
2964 if ((off + count) * blksize > vp->v_size)
2965 return EINVAL;
2966 #endif
2967 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2968 return error;
2969 }
2970 #else /* ! _KERNEL */
2971 devvp = vp;
2972 logpbn = off;
2973 #endif /* ! _KERNEL */
2974
2975 scratch = wapbl_alloc(MAXBSIZE);
2976
2977 pbn = logpbn;
2978 #ifdef _KERNEL
2979 pbn = btodb(pbn << log_dev_bshift);
2980 #endif
2981 error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, pbn);
2982 if (error)
2983 goto errout;
2984
2985 wch = (struct wapbl_wc_header *)scratch;
2986 wch2 =
2987 (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
2988 /* XXX verify checksums and magic numbers */
2989 if (wch->wc_type != WAPBL_WC_HEADER) {
2990 printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
2991 error = EFTYPE;
2992 goto errout;
2993 }
2994
2995 if (wch2->wc_generation > wch->wc_generation)
2996 wch = wch2;
2997
2998 wr = wapbl_calloc(1, sizeof(*wr));
2999
3000 wr->wr_logvp = vp;
3001 wr->wr_devvp = devvp;
3002 wr->wr_logpbn = logpbn;
3003
3004 wr->wr_scratch = scratch;
3005
3006 wr->wr_log_dev_bshift = wch->wc_log_dev_bshift;
3007 wr->wr_fs_dev_bshift = wch->wc_fs_dev_bshift;
3008 wr->wr_circ_off = wch->wc_circ_off;
3009 wr->wr_circ_size = wch->wc_circ_size;
3010 wr->wr_generation = wch->wc_generation;
3011
3012 used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
3013
3014 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
3015 ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
3016 " len=%"PRId64" used=%zu\n",
3017 wch->wc_head, wch->wc_tail, wch->wc_circ_off,
3018 wch->wc_circ_size, used));
3019
3020 wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
3021
3022 error = wapbl_replay_process(wr, wch->wc_head, wch->wc_tail);
3023 if (error) {
3024 wapbl_replay_stop(wr);
3025 wapbl_replay_free(wr);
3026 return error;
3027 }
3028
3029 *wrp = wr;
3030 return 0;
3031
3032 errout:
3033 wapbl_free(scratch, MAXBSIZE);
3034 return error;
3035 }
3036
3037 void
3038 wapbl_replay_stop(struct wapbl_replay *wr)
3039 {
3040
3041 if (!wapbl_replay_isopen(wr))
3042 return;
3043
3044 WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
3045
3046 wapbl_free(wr->wr_scratch, MAXBSIZE);
3047 wr->wr_scratch = NULL;
3048
3049 wr->wr_logvp = NULL;
3050
3051 wapbl_blkhash_clear(wr);
3052 wapbl_blkhash_free(wr);
3053 }
3054
3055 void
3056 wapbl_replay_free(struct wapbl_replay *wr)
3057 {
3058
3059 KDASSERT(!wapbl_replay_isopen(wr));
3060
3061 if (wr->wr_inodes)
3062 wapbl_free(wr->wr_inodes,
3063 wr->wr_inodescnt * sizeof(wr->wr_inodes[0]));
3064 wapbl_free(wr, sizeof(*wr));
3065 }
3066
3067 #ifdef _KERNEL
3068 int
3069 wapbl_replay_isopen1(struct wapbl_replay *wr)
3070 {
3071
3072 return wapbl_replay_isopen(wr);
3073 }
3074 #endif
3075
3076 /*
3077 * calculate the disk address for the i'th block in the wc_blockblist
3078 * offset by j blocks of size blen.
3079 *
3080 * wc_daddr is always a kernel disk address in DEV_BSIZE units that
3081 * was written to the journal.
3082 *
3083 * The kernel needs that address plus the offset in DEV_BSIZE units.
3084 *
3085 * Userland needs that address plus the offset in blen units.
3086 *
3087 */
3088 static daddr_t
3089 wapbl_block_daddr(struct wapbl_wc_blocklist *wc, int i, int j, int blen)
3090 {
3091 daddr_t pbn;
3092
3093 #ifdef _KERNEL
3094 pbn = wc->wc_blocks[i].wc_daddr + btodb(j * blen);
3095 #else
3096 pbn = dbtob(wc->wc_blocks[i].wc_daddr) / blen + j;
3097 #endif
3098
3099 return pbn;
3100 }
3101
3102 static void
3103 wapbl_replay_process_blocks(struct wapbl_replay *wr, off_t *offp)
3104 {
3105 struct wapbl_wc_blocklist *wc =
3106 (struct wapbl_wc_blocklist *)wr->wr_scratch;
3107 int fsblklen = 1 << wr->wr_fs_dev_bshift;
3108 int i, j, n;
3109
3110 for (i = 0; i < wc->wc_blkcount; i++) {
3111 /*
3112 * Enter each physical block into the hashtable independently.
3113 */
3114 n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
3115 for (j = 0; j < n; j++) {
3116 wapbl_blkhash_ins(wr, wapbl_block_daddr(wc, i, j, fsblklen),
3117 *offp);
3118 wapbl_circ_advance(wr, fsblklen, offp);
3119 }
3120 }
3121 }
3122
3123 static void
3124 wapbl_replay_process_revocations(struct wapbl_replay *wr)
3125 {
3126 struct wapbl_wc_blocklist *wc =
3127 (struct wapbl_wc_blocklist *)wr->wr_scratch;
3128 int fsblklen = 1 << wr->wr_fs_dev_bshift;
3129 int i, j, n;
3130
3131 for (i = 0; i < wc->wc_blkcount; i++) {
3132 /*
3133 * Remove any blocks found from the hashtable.
3134 */
3135 n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
3136 for (j = 0; j < n; j++)
3137 wapbl_blkhash_rem(wr, wapbl_block_daddr(wc, i, j, fsblklen));
3138 }
3139 }
3140
3141 static void
3142 wapbl_replay_process_inodes(struct wapbl_replay *wr, off_t oldoff, off_t newoff)
3143 {
3144 struct wapbl_wc_inodelist *wc =
3145 (struct wapbl_wc_inodelist *)wr->wr_scratch;
3146 void *new_inodes;
3147 const size_t oldsize = wr->wr_inodescnt * sizeof(wr->wr_inodes[0]);
3148
3149 KASSERT(sizeof(wr->wr_inodes[0]) == sizeof(wc->wc_inodes[0]));
3150
3151 /*
3152 * Keep track of where we found this so location won't be
3153 * overwritten.
3154 */
3155 if (wc->wc_clear) {
3156 wr->wr_inodestail = oldoff;
3157 wr->wr_inodescnt = 0;
3158 if (wr->wr_inodes != NULL) {
3159 wapbl_free(wr->wr_inodes, oldsize);
3160 wr->wr_inodes = NULL;
3161 }
3162 }
3163 wr->wr_inodeshead = newoff;
3164 if (wc->wc_inocnt == 0)
3165 return;
3166
3167 new_inodes = wapbl_alloc((wr->wr_inodescnt + wc->wc_inocnt) *
3168 sizeof(wr->wr_inodes[0]));
3169 if (wr->wr_inodes != NULL) {
3170 memcpy(new_inodes, wr->wr_inodes, oldsize);
3171 wapbl_free(wr->wr_inodes, oldsize);
3172 }
3173 wr->wr_inodes = new_inodes;
3174 memcpy(&wr->wr_inodes[wr->wr_inodescnt], wc->wc_inodes,
3175 wc->wc_inocnt * sizeof(wr->wr_inodes[0]));
3176 wr->wr_inodescnt += wc->wc_inocnt;
3177 }
3178
3179 static int
3180 wapbl_replay_process(struct wapbl_replay *wr, off_t head, off_t tail)
3181 {
3182 off_t off;
3183 int error;
3184
3185 int logblklen = 1 << wr->wr_log_dev_bshift;
3186
3187 wapbl_blkhash_clear(wr);
3188
3189 off = tail;
3190 while (off != head) {
3191 struct wapbl_wc_null *wcn;
3192 off_t saveoff = off;
3193 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
3194 if (error)
3195 goto errout;
3196 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
3197 switch (wcn->wc_type) {
3198 case WAPBL_WC_BLOCKS:
3199 wapbl_replay_process_blocks(wr, &off);
3200 break;
3201
3202 case WAPBL_WC_REVOCATIONS:
3203 wapbl_replay_process_revocations(wr);
3204 break;
3205
3206 case WAPBL_WC_INODES:
3207 wapbl_replay_process_inodes(wr, saveoff, off);
3208 break;
3209
3210 default:
3211 printf("Unrecognized wapbl type: 0x%08x\n",
3212 wcn->wc_type);
3213 error = EFTYPE;
3214 goto errout;
3215 }
3216 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
3217 if (off != saveoff) {
3218 printf("wapbl_replay: corrupted records\n");
3219 error = EFTYPE;
3220 goto errout;
3221 }
3222 }
3223 return 0;
3224
3225 errout:
3226 wapbl_blkhash_clear(wr);
3227 return error;
3228 }
3229
3230 #if 0
3231 int
3232 wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
3233 {
3234 off_t off;
3235 int mismatchcnt = 0;
3236 int logblklen = 1 << wr->wr_log_dev_bshift;
3237 int fsblklen = 1 << wr->wr_fs_dev_bshift;
3238 void *scratch1 = wapbl_alloc(MAXBSIZE);
3239 void *scratch2 = wapbl_alloc(MAXBSIZE);
3240 int error = 0;
3241
3242 KDASSERT(wapbl_replay_isopen(wr));
3243
3244 off = wch->wc_tail;
3245 while (off != wch->wc_head) {
3246 struct wapbl_wc_null *wcn;
3247 #ifdef DEBUG
3248 off_t saveoff = off;
3249 #endif
3250 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
3251 if (error)
3252 goto out;
3253 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
3254 switch (wcn->wc_type) {
3255 case WAPBL_WC_BLOCKS:
3256 {
3257 struct wapbl_wc_blocklist *wc =
3258 (struct wapbl_wc_blocklist *)wr->wr_scratch;
3259 int i;
3260 for (i = 0; i < wc->wc_blkcount; i++) {
3261 int foundcnt = 0;
3262 int dirtycnt = 0;
3263 int j, n;
3264 /*
3265 * Check each physical block into the
3266 * hashtable independently
3267 */
3268 n = wc->wc_blocks[i].wc_dlen >>
3269 wch->wc_fs_dev_bshift;
3270 for (j = 0; j < n; j++) {
3271 struct wapbl_blk *wb =
3272 wapbl_blkhash_get(wr,
3273 wapbl_block_daddr(wc, i, j, fsblklen));
3274 if (wb && (wb->wb_off == off)) {
3275 foundcnt++;
3276 error =
3277 wapbl_circ_read(wr,
3278 scratch1, fsblklen,
3279 &off);
3280 if (error)
3281 goto out;
3282 error =
3283 wapbl_read(scratch2,
3284 fsblklen, fsdevvp,
3285 wb->wb_blk);
3286 if (error)
3287 goto out;
3288 if (memcmp(scratch1,
3289 scratch2,
3290 fsblklen)) {
3291 printf(
3292 "wapbl_verify: mismatch block %"PRId64" at off %"PRIdMAX"\n",
3293 wb->wb_blk, (intmax_t)off);
3294 dirtycnt++;
3295 mismatchcnt++;
3296 }
3297 } else {
3298 wapbl_circ_advance(wr,
3299 fsblklen, &off);
3300 }
3301 }
3302 #if 0
3303 /*
3304 * If all of the blocks in an entry
3305 * are clean, then remove all of its
3306 * blocks from the hashtable since they
3307 * never will need replay.
3308 */
3309 if ((foundcnt != 0) &&
3310 (dirtycnt == 0)) {
3311 off = saveoff;
3312 wapbl_circ_advance(wr,
3313 logblklen, &off);
3314 for (j = 0; j < n; j++) {
3315 struct wapbl_blk *wb =
3316 wapbl_blkhash_get(wr,
3317 wapbl_block_daddr(wc, i, j, fsblklen));
3318 if (wb &&
3319 (wb->wb_off == off)) {
3320 wapbl_blkhash_rem(wr, wb->wb_blk);
3321 }
3322 wapbl_circ_advance(wr,
3323 fsblklen, &off);
3324 }
3325 }
3326 #endif
3327 }
3328 }
3329 break;
3330 case WAPBL_WC_REVOCATIONS:
3331 case WAPBL_WC_INODES:
3332 break;
3333 default:
3334 KASSERT(0);
3335 }
3336 #ifdef DEBUG
3337 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
3338 KASSERT(off == saveoff);
3339 #endif
3340 }
3341 out:
3342 wapbl_free(scratch1, MAXBSIZE);
3343 wapbl_free(scratch2, MAXBSIZE);
3344 if (!error && mismatchcnt)
3345 error = EFTYPE;
3346 return error;
3347 }
3348 #endif
3349
3350 int
3351 wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
3352 {
3353 struct wapbl_blk *wb;
3354 size_t i;
3355 off_t off;
3356 void *scratch;
3357 int error = 0;
3358 int fsblklen = 1 << wr->wr_fs_dev_bshift;
3359
3360 KDASSERT(wapbl_replay_isopen(wr));
3361
3362 scratch = wapbl_alloc(MAXBSIZE);
3363
3364 for (i = 0; i <= wr->wr_blkhashmask; ++i) {
3365 LIST_FOREACH(wb, &wr->wr_blkhash[i], wb_hash) {
3366 off = wb->wb_off;
3367 error = wapbl_circ_read(wr, scratch, fsblklen, &off);
3368 if (error)
3369 break;
3370 error = wapbl_write(scratch, fsblklen, fsdevvp,
3371 wb->wb_blk);
3372 if (error)
3373 break;
3374 }
3375 }
3376
3377 wapbl_free(scratch, MAXBSIZE);
3378 return error;
3379 }
3380
3381 int
3382 wapbl_replay_can_read(struct wapbl_replay *wr, daddr_t blk, long len)
3383 {
3384 int fsblklen = 1 << wr->wr_fs_dev_bshift;
3385
3386 KDASSERT(wapbl_replay_isopen(wr));
3387 KASSERT((len % fsblklen) == 0);
3388
3389 while (len != 0) {
3390 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
3391 if (wb)
3392 return 1;
3393 len -= fsblklen;
3394 }
3395 return 0;
3396 }
3397
3398 int
3399 wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
3400 {
3401 int fsblklen = 1 << wr->wr_fs_dev_bshift;
3402
3403 KDASSERT(wapbl_replay_isopen(wr));
3404
3405 KASSERT((len % fsblklen) == 0);
3406
3407 while (len != 0) {
3408 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
3409 if (wb) {
3410 off_t off = wb->wb_off;
3411 int error;
3412 error = wapbl_circ_read(wr, data, fsblklen, &off);
3413 if (error)
3414 return error;
3415 }
3416 data = (uint8_t *)data + fsblklen;
3417 len -= fsblklen;
3418 blk++;
3419 }
3420 return 0;
3421 }
3422
3423 #ifdef _KERNEL
3424
3425 MODULE(MODULE_CLASS_VFS, wapbl, NULL);
3426
3427 static int
3428 wapbl_modcmd(modcmd_t cmd, void *arg)
3429 {
3430
3431 switch (cmd) {
3432 case MODULE_CMD_INIT:
3433 wapbl_init();
3434 return 0;
3435 case MODULE_CMD_FINI:
3436 return wapbl_fini();
3437 default:
3438 return ENOTTY;
3439 }
3440 }
3441 #endif /* _KERNEL */
3442