vfs_wapbl.c revision 1.52.2.5 1 /* $NetBSD: vfs_wapbl.c,v 1.52.2.5 2017/12/03 11:38:45 jdolecek Exp $ */
2
3 /*-
4 * Copyright (c) 2003, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This implements file system independent write ahead filesystem logging.
34 */
35
36 #define WAPBL_INTERNAL
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.52.2.5 2017/12/03 11:38:45 jdolecek Exp $");
40
41 #include <sys/param.h>
42 #include <sys/bitops.h>
43 #include <sys/time.h>
44 #include <sys/wapbl.h>
45 #include <sys/wapbl_replay.h>
46
47 #ifdef _KERNEL
48
49 #include <sys/atomic.h>
50 #include <sys/conf.h>
51 #include <sys/evcnt.h>
52 #include <sys/file.h>
53 #include <sys/kauth.h>
54 #include <sys/kernel.h>
55 #include <sys/module.h>
56 #include <sys/mount.h>
57 #include <sys/mutex.h>
58 #include <sys/namei.h>
59 #include <sys/proc.h>
60 #include <sys/resourcevar.h>
61 #include <sys/sysctl.h>
62 #include <sys/uio.h>
63 #include <sys/vnode.h>
64
65 #include <miscfs/specfs/specdev.h>
66
67 #define wapbl_alloc(s) kmem_alloc((s), KM_SLEEP)
68 #define wapbl_free(a, s) kmem_free((a), (s))
69 #define wapbl_calloc(n, s) kmem_zalloc((n)*(s), KM_SLEEP)
70
71 static struct sysctllog *wapbl_sysctl;
72 static int wapbl_flush_disk_cache = 1;
73 static int wapbl_verbose_commit = 0;
74 static int wapbl_allow_dpofua = 0; /* switched off by default for now */
75 static int wapbl_journal_iobufs = 4;
76
77 static inline size_t wapbl_space_free(size_t, off_t, off_t);
78
79 #else /* !_KERNEL */
80
81 #include <assert.h>
82 #include <errno.h>
83 #include <stdbool.h>
84 #include <stdio.h>
85 #include <stdlib.h>
86 #include <string.h>
87
88 #define KDASSERT(x) assert(x)
89 #define KASSERT(x) assert(x)
90 #define wapbl_alloc(s) malloc(s)
91 #define wapbl_free(a, s) free(a)
92 #define wapbl_calloc(n, s) calloc((n), (s))
93
94 #endif /* !_KERNEL */
95
96 /*
97 * INTERNAL DATA STRUCTURES
98 */
99
100 /*
101 * This structure holds per-mount log information.
102 *
103 * Legend: a = atomic access only
104 * r = read-only after init
105 * l = rwlock held
106 * m = mutex held
107 * lm = rwlock held writing or mutex held
108 * u = unlocked access ok
109 * b = bufcache_lock held
110 */
111 LIST_HEAD(wapbl_ino_head, wapbl_ino);
112 struct wapbl {
113 struct vnode *wl_logvp; /* r: log here */
114 struct vnode *wl_devvp; /* r: log on this device */
115 struct mount *wl_mount; /* r: mountpoint wl is associated with */
116 daddr_t wl_logpbn; /* r: Physical block number of start of log */
117 int wl_log_dev_bshift; /* r: logarithm of device block size of log
118 device */
119 int wl_fs_dev_bshift; /* r: logarithm of device block size of
120 filesystem device */
121
122 unsigned wl_lock_count; /* m: Count of transactions in progress */
123
124 size_t wl_circ_size; /* r: Number of bytes in buffer of log */
125 size_t wl_circ_off; /* r: Number of bytes reserved at start */
126
127 size_t wl_bufcount_max; /* r: Number of buffers reserved for log */
128 size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */
129
130 off_t wl_head; /* l: Byte offset of log head */
131 off_t wl_tail; /* l: Byte offset of log tail */
132 /*
133 * WAPBL log layout, stored on wl_devvp at wl_logpbn:
134 *
135 * ___________________ wl_circ_size __________________
136 * / \
137 * +---------+---------+-------+--------------+--------+
138 * [ commit0 | commit1 | CCWCW | EEEEEEEEEEEE | CCCWCW ]
139 * +---------+---------+-------+--------------+--------+
140 * wl_circ_off --^ ^-- wl_head ^-- wl_tail
141 *
142 * commit0 and commit1 are commit headers. A commit header has
143 * a generation number, indicating which of the two headers is
144 * more recent, and an assignment of head and tail pointers.
145 * The rest is a circular queue of log records, starting at
146 * the byte offset wl_circ_off.
147 *
148 * E marks empty space for records.
149 * W marks records for block writes issued but waiting.
150 * C marks completed records.
151 *
152 * wapbl_flush writes new records to empty `E' spaces after
153 * wl_head from the current transaction in memory.
154 *
155 * wapbl_truncate advances wl_tail past any completed `C'
156 * records, freeing them up for use.
157 *
158 * head == tail == 0 means log is empty.
159 * head == tail != 0 means log is full.
160 *
161 * See assertions in wapbl_advance() for other boundary
162 * conditions.
163 *
164 * Only wapbl_flush moves the head, except when wapbl_truncate
165 * sets it to 0 to indicate that the log is empty.
166 *
167 * Only wapbl_truncate moves the tail, except when wapbl_flush
168 * sets it to wl_circ_off to indicate that the log is full.
169 */
170
171 struct wapbl_wc_header *wl_wc_header; /* l */
172 void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */
173
174 kmutex_t wl_mtx; /* u: short-term lock */
175 krwlock_t wl_rwlock; /* u: File system transaction lock */
176
177 /*
178 * Must be held while accessing
179 * wl_count or wl_bufs or head or tail
180 */
181
182 #if _KERNEL
183 /*
184 * Callback called from within the flush routine to flush any extra
185 * bits. Note that flush may be skipped without calling this if
186 * there are no outstanding buffers in the transaction.
187 */
188 wapbl_flush_fn_t wl_flush; /* r */
189 wapbl_flush_fn_t wl_flush_abort;/* r */
190
191 /* Event counters */
192 char wl_ev_group[EVCNT_STRING_MAX]; /* r */
193 struct evcnt wl_ev_commit; /* l */
194 struct evcnt wl_ev_journalwrite; /* l */
195 struct evcnt wl_ev_jbufs_bio_nowait; /* l */
196 struct evcnt wl_ev_metawrite; /* lm */
197 struct evcnt wl_ev_cacheflush; /* l */
198 #endif
199
200 size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */
201 size_t wl_bufcount; /* m: Count of buffers in wl_bufs */
202 size_t wl_bcount; /* m: Total bcount of wl_bufs */
203
204 TAILQ_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
205
206 kcondvar_t wl_reclaimable_cv; /* m (obviously) */
207 size_t wl_reclaimable_bytes; /* m: Amount of space available for
208 reclamation by truncate */
209 int wl_error_count; /* m: # of wl_entries with errors */
210 size_t wl_reserved_bytes; /* never truncate log smaller than this */
211
212 #ifdef WAPBL_DEBUG_BUFBYTES
213 size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
214 #endif
215
216 #if _KERNEL
217 int wl_brperjblock; /* r Block records per journal block */
218 #endif
219
220 TAILQ_HEAD(, wapbl_dealloc) wl_dealloclist; /* lm: list head */
221 int wl_dealloccnt; /* lm: total count */
222 int wl_dealloclim; /* r: max count */
223
224 /* hashtable of inode numbers for allocated but unlinked inodes */
225 /* synch ??? */
226 struct wapbl_ino_head *wl_inohash;
227 u_long wl_inohashmask;
228 int wl_inohashcnt;
229
230 SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* On disk transaction
231 accounting */
232
233 /* buffers for wapbl_buffered_write() */
234 TAILQ_HEAD(, buf) wl_iobufs; /* l: Free or filling bufs */
235 TAILQ_HEAD(, buf) wl_iobufs_busy; /* l: In-transit bufs */
236
237 int wl_dkcache; /* r: disk cache flags */
238 #define WAPBL_USE_FUA(wl) \
239 (wapbl_allow_dpofua && ISSET((wl)->wl_dkcache, DKCACHE_FUA))
240 #define WAPBL_JFLAGS(wl) \
241 (WAPBL_USE_FUA(wl) ? (wl)->wl_jwrite_flags : 0)
242 #define WAPBL_JDATA_FLAGS(wl) \
243 (WAPBL_JFLAGS(wl) & B_MEDIA_DPO) /* only DPO */
244 int wl_jwrite_flags; /* r: journal write flags */
245 };
246
247 #ifdef WAPBL_DEBUG_PRINT
248 int wapbl_debug_print = WAPBL_DEBUG_PRINT;
249 #endif
250
251 /****************************************************************/
252 #ifdef _KERNEL
253
254 #ifdef WAPBL_DEBUG
255 struct wapbl *wapbl_debug_wl;
256 #endif
257
258 static int wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail);
259 static int wapbl_write_blocks(struct wapbl *wl, off_t *offp);
260 static int wapbl_write_revocations(struct wapbl *wl, off_t *offp);
261 static int wapbl_write_inodes(struct wapbl *wl, off_t *offp);
262 #endif /* _KERNEL */
263
264 static int wapbl_replay_process(struct wapbl_replay *wr, off_t, off_t);
265
266 static inline size_t wapbl_space_used(size_t avail, off_t head,
267 off_t tail);
268
269 #ifdef _KERNEL
270
271 static struct pool wapbl_entry_pool;
272 static struct pool wapbl_dealloc_pool;
273
274 #define WAPBL_INODETRK_SIZE 83
275 static int wapbl_ino_pool_refcount;
276 static struct pool wapbl_ino_pool;
277 struct wapbl_ino {
278 LIST_ENTRY(wapbl_ino) wi_hash;
279 ino_t wi_ino;
280 mode_t wi_mode;
281 };
282
283 static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
284 static void wapbl_inodetrk_free(struct wapbl *wl);
285 static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
286
287 static size_t wapbl_transaction_len(struct wapbl *wl);
288 static inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
289
290 static void wapbl_deallocation_free(struct wapbl *, struct wapbl_dealloc *,
291 bool);
292
293 static void wapbl_evcnt_init(struct wapbl *);
294 static void wapbl_evcnt_free(struct wapbl *);
295
296 static void wapbl_dkcache_init(struct wapbl *);
297
298 #if 0
299 int wapbl_replay_verify(struct wapbl_replay *, struct vnode *);
300 #endif
301
302 static int wapbl_replay_isopen1(struct wapbl_replay *);
303
304 struct wapbl_ops wapbl_ops = {
305 .wo_wapbl_discard = wapbl_discard,
306 .wo_wapbl_replay_isopen = wapbl_replay_isopen1,
307 .wo_wapbl_replay_can_read = wapbl_replay_can_read,
308 .wo_wapbl_replay_read = wapbl_replay_read,
309 .wo_wapbl_add_buf = wapbl_add_buf,
310 .wo_wapbl_remove_buf = wapbl_remove_buf,
311 .wo_wapbl_resize_buf = wapbl_resize_buf,
312 .wo_wapbl_begin = wapbl_begin,
313 .wo_wapbl_end = wapbl_end,
314 .wo_wapbl_junlock_assert= wapbl_junlock_assert,
315
316 /* XXX: the following is only used to say "this is a wapbl buf" */
317 .wo_wapbl_biodone = wapbl_biodone,
318 };
319
320 static int
321 wapbl_sysctl_init(void)
322 {
323 int rv;
324 const struct sysctlnode *rnode, *cnode;
325
326 wapbl_sysctl = NULL;
327
328 rv = sysctl_createv(&wapbl_sysctl, 0, NULL, &rnode,
329 CTLFLAG_PERMANENT,
330 CTLTYPE_NODE, "wapbl",
331 SYSCTL_DESCR("WAPBL journaling options"),
332 NULL, 0, NULL, 0,
333 CTL_VFS, CTL_CREATE, CTL_EOL);
334 if (rv)
335 return rv;
336
337 rv = sysctl_createv(&wapbl_sysctl, 0, &rnode, &cnode,
338 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
339 CTLTYPE_INT, "flush_disk_cache",
340 SYSCTL_DESCR("flush disk cache"),
341 NULL, 0, &wapbl_flush_disk_cache, 0,
342 CTL_CREATE, CTL_EOL);
343 if (rv)
344 return rv;
345
346 rv = sysctl_createv(&wapbl_sysctl, 0, &rnode, &cnode,
347 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
348 CTLTYPE_INT, "verbose_commit",
349 SYSCTL_DESCR("show time and size of wapbl log commits"),
350 NULL, 0, &wapbl_verbose_commit, 0,
351 CTL_CREATE, CTL_EOL);
352 if (rv)
353 return rv;
354
355 rv = sysctl_createv(&wapbl_sysctl, 0, &rnode, &cnode,
356 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
357 CTLTYPE_INT, "allow_dpofua",
358 SYSCTL_DESCR("allow use of FUA/DPO instead of cash flush if available"),
359 NULL, 0, &wapbl_allow_dpofua, 0,
360 CTL_CREATE, CTL_EOL);
361 if (rv)
362 return rv;
363
364 rv = sysctl_createv(&wapbl_sysctl, 0, &rnode, &cnode,
365 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
366 CTLTYPE_INT, "journal_iobufs",
367 SYSCTL_DESCR("count of bufs used for journal I/O (max async count)"),
368 NULL, 0, &wapbl_journal_iobufs, 0,
369 CTL_CREATE, CTL_EOL);
370 if (rv)
371 return rv;
372
373 return rv;
374 }
375
376 static void
377 wapbl_init(void)
378 {
379
380 pool_init(&wapbl_entry_pool, sizeof(struct wapbl_entry), 0, 0, 0,
381 "wapblentrypl", &pool_allocator_kmem, IPL_VM);
382 pool_init(&wapbl_dealloc_pool, sizeof(struct wapbl_dealloc), 0, 0, 0,
383 "wapbldealloc", &pool_allocator_nointr, IPL_NONE);
384
385 wapbl_sysctl_init();
386 }
387
388 static int
389 wapbl_fini(void)
390 {
391
392 if (wapbl_sysctl != NULL)
393 sysctl_teardown(&wapbl_sysctl);
394
395 pool_destroy(&wapbl_dealloc_pool);
396 pool_destroy(&wapbl_entry_pool);
397
398 return 0;
399 }
400
401 static void
402 wapbl_evcnt_init(struct wapbl *wl)
403 {
404 snprintf(wl->wl_ev_group, sizeof(wl->wl_ev_group),
405 "wapbl fsid 0x%x/0x%x",
406 wl->wl_mount->mnt_stat.f_fsidx.__fsid_val[0],
407 wl->wl_mount->mnt_stat.f_fsidx.__fsid_val[1]
408 );
409
410 evcnt_attach_dynamic(&wl->wl_ev_commit, EVCNT_TYPE_MISC,
411 NULL, wl->wl_ev_group, "commit");
412 evcnt_attach_dynamic(&wl->wl_ev_journalwrite, EVCNT_TYPE_MISC,
413 NULL, wl->wl_ev_group, "journal write total");
414 evcnt_attach_dynamic(&wl->wl_ev_jbufs_bio_nowait, EVCNT_TYPE_MISC,
415 NULL, wl->wl_ev_group, "journal write finished async");
416 evcnt_attach_dynamic(&wl->wl_ev_metawrite, EVCNT_TYPE_MISC,
417 NULL, wl->wl_ev_group, "metadata async write");
418 evcnt_attach_dynamic(&wl->wl_ev_cacheflush, EVCNT_TYPE_MISC,
419 NULL, wl->wl_ev_group, "cache flush");
420 }
421
422 static void
423 wapbl_evcnt_free(struct wapbl *wl)
424 {
425 evcnt_detach(&wl->wl_ev_commit);
426 evcnt_detach(&wl->wl_ev_journalwrite);
427 evcnt_detach(&wl->wl_ev_jbufs_bio_nowait);
428 evcnt_detach(&wl->wl_ev_metawrite);
429 evcnt_detach(&wl->wl_ev_cacheflush);
430 }
431
432 static void
433 wapbl_dkcache_init(struct wapbl *wl)
434 {
435 int error;
436
437 /* Get disk cache flags */
438 error = VOP_IOCTL(wl->wl_devvp, DIOCGCACHE, &wl->wl_dkcache,
439 FWRITE, FSCRED);
440 if (error) {
441 /* behave as if there was a write cache */
442 wl->wl_dkcache = DKCACHE_WRITE;
443 }
444
445 /* Use FUA instead of cache flush if available */
446 if (ISSET(wl->wl_dkcache, DKCACHE_FUA))
447 wl->wl_jwrite_flags |= B_MEDIA_FUA;
448
449 /* Use DPO for journal writes if available */
450 if (ISSET(wl->wl_dkcache, DKCACHE_DPO))
451 wl->wl_jwrite_flags |= B_MEDIA_DPO;
452 }
453
454 static int
455 wapbl_start_flush_inodes(struct wapbl *wl, struct wapbl_replay *wr)
456 {
457 int error, i;
458
459 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
460 ("wapbl_start: reusing log with %d inodes\n", wr->wr_inodescnt));
461
462 /*
463 * Its only valid to reuse the replay log if its
464 * the same as the new log we just opened.
465 */
466 KDASSERT(!wapbl_replay_isopen(wr));
467 KASSERT(wl->wl_devvp->v_type == VBLK);
468 KASSERT(wr->wr_devvp->v_type == VBLK);
469 KASSERT(wl->wl_devvp->v_rdev == wr->wr_devvp->v_rdev);
470 KASSERT(wl->wl_logpbn == wr->wr_logpbn);
471 KASSERT(wl->wl_circ_size == wr->wr_circ_size);
472 KASSERT(wl->wl_circ_off == wr->wr_circ_off);
473 KASSERT(wl->wl_log_dev_bshift == wr->wr_log_dev_bshift);
474 KASSERT(wl->wl_fs_dev_bshift == wr->wr_fs_dev_bshift);
475
476 wl->wl_wc_header->wc_generation = wr->wr_generation + 1;
477
478 for (i = 0; i < wr->wr_inodescnt; i++)
479 wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
480 wr->wr_inodes[i].wr_imode);
481
482 /* Make sure new transaction won't overwrite old inodes list */
483 KDASSERT(wapbl_transaction_len(wl) <=
484 wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
485 wr->wr_inodestail));
486
487 wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
488 wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
489 wapbl_transaction_len(wl);
490
491 error = wapbl_write_inodes(wl, &wl->wl_head);
492 if (error)
493 return error;
494
495 KASSERT(wl->wl_head != wl->wl_tail);
496 KASSERT(wl->wl_head != 0);
497
498 return 0;
499 }
500
501 int
502 wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
503 daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
504 wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
505 {
506 struct wapbl *wl;
507 struct vnode *devvp;
508 daddr_t logpbn;
509 int error;
510 int log_dev_bshift = ilog2(blksize);
511 int fs_dev_bshift = log_dev_bshift;
512 int run;
513
514 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_start: vp=%p off=%" PRId64
515 " count=%zu blksize=%zu\n", vp, off, count, blksize));
516
517 if (log_dev_bshift > fs_dev_bshift) {
518 WAPBL_PRINTF(WAPBL_PRINT_OPEN,
519 ("wapbl: log device's block size cannot be larger "
520 "than filesystem's\n"));
521 /*
522 * Not currently implemented, although it could be if
523 * needed someday.
524 */
525 return ENOSYS;
526 }
527
528 if (off < 0)
529 return EINVAL;
530
531 if (blksize < DEV_BSIZE)
532 return EINVAL;
533 if (blksize % DEV_BSIZE)
534 return EINVAL;
535
536 /* XXXTODO: verify that the full load is writable */
537
538 /*
539 * XXX check for minimum log size
540 * minimum is governed by minimum amount of space
541 * to complete a transaction. (probably truncate)
542 */
543 /* XXX for now pick something minimal */
544 if ((count * blksize) < mp->mnt_maxphys) {
545 return ENOSPC;
546 }
547
548 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
549 return error;
550 }
551
552 wl = wapbl_calloc(1, sizeof(*wl));
553 rw_init(&wl->wl_rwlock);
554 mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
555 cv_init(&wl->wl_reclaimable_cv, "wapblrec");
556 TAILQ_INIT(&wl->wl_bufs);
557 SIMPLEQ_INIT(&wl->wl_entries);
558
559 wl->wl_logvp = vp;
560 wl->wl_devvp = devvp;
561 wl->wl_mount = mp;
562 wl->wl_logpbn = logpbn;
563 wl->wl_log_dev_bshift = log_dev_bshift;
564 wl->wl_fs_dev_bshift = fs_dev_bshift;
565
566 wl->wl_flush = flushfn;
567 wl->wl_flush_abort = flushabortfn;
568
569 /* Reserve two log device blocks for the commit headers */
570 wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
571 wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
572 /* truncate the log usage to a multiple of log_dev_bshift */
573 wl->wl_circ_size >>= wl->wl_log_dev_bshift;
574 wl->wl_circ_size <<= wl->wl_log_dev_bshift;
575
576 /*
577 * wl_bufbytes_max limits the size of the in memory transaction space.
578 * - Since buffers are allocated and accounted for in units of
579 * PAGE_SIZE it is required to be a multiple of PAGE_SIZE
580 * (i.e. 1<<PAGE_SHIFT)
581 * - Since the log device has to be written in units of
582 * 1<<wl_log_dev_bshift it is required to be a mulitple of
583 * 1<<wl_log_dev_bshift.
584 * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
585 * it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
586 * Therefore it must be multiple of the least common multiple of those
587 * three quantities. Fortunately, all of those quantities are
588 * guaranteed to be a power of two, and the least common multiple of
589 * a set of numbers which are all powers of two is simply the maximum
590 * of those numbers. Finally, the maximum logarithm of a power of two
591 * is the same as the log of the maximum power of two. So we can do
592 * the following operations to size wl_bufbytes_max:
593 */
594
595 /* XXX fix actual number of pages reserved per filesystem. */
596 wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
597
598 /* Round wl_bufbytes_max to the largest power of two constraint */
599 wl->wl_bufbytes_max >>= PAGE_SHIFT;
600 wl->wl_bufbytes_max <<= PAGE_SHIFT;
601 wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
602 wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
603 wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
604 wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
605
606 /* XXX maybe use filesystem fragment size instead of 1024 */
607 /* XXX fix actual number of buffers reserved per filesystem. */
608 wl->wl_bufcount_max = (buf_nbuf() / 2) * 1024;
609
610 wl->wl_brperjblock = ((1<<wl->wl_log_dev_bshift)
611 - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
612 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
613 KASSERT(wl->wl_brperjblock > 0);
614
615 /* XXX tie this into resource estimation */
616 wl->wl_dealloclim = wl->wl_bufbytes_max / mp->mnt_stat.f_bsize / 2;
617 TAILQ_INIT(&wl->wl_dealloclist);
618
619 wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
620
621 wapbl_evcnt_init(wl);
622
623 wapbl_dkcache_init(wl);
624
625 /* Initialize the commit header */
626 {
627 struct wapbl_wc_header *wc;
628 size_t len = 1 << wl->wl_log_dev_bshift;
629 wc = wapbl_calloc(1, len);
630 wc->wc_type = WAPBL_WC_HEADER;
631 wc->wc_len = len;
632 wc->wc_circ_off = wl->wl_circ_off;
633 wc->wc_circ_size = wl->wl_circ_size;
634 /* XXX wc->wc_fsid */
635 wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
636 wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
637 wl->wl_wc_header = wc;
638 wl->wl_wc_scratch = wapbl_alloc(len);
639 }
640
641 TAILQ_INIT(&wl->wl_iobufs);
642 TAILQ_INIT(&wl->wl_iobufs_busy);
643 for (int i = 0; i < wapbl_journal_iobufs; i++) {
644 struct buf *bp;
645
646 if ((bp = geteblk(MAXPHYS)) == NULL)
647 goto errout;
648
649 mutex_enter(&bufcache_lock);
650 mutex_enter(devvp->v_interlock);
651 bgetvp(devvp, bp);
652 mutex_exit(devvp->v_interlock);
653 mutex_exit(&bufcache_lock);
654
655 bp->b_dev = devvp->v_rdev;
656
657 TAILQ_INSERT_TAIL(&wl->wl_iobufs, bp, b_wapbllist);
658 }
659
660 /*
661 * if there was an existing set of unlinked but
662 * allocated inodes, preserve it in the new
663 * log.
664 */
665 if (wr && wr->wr_inodescnt) {
666 error = wapbl_start_flush_inodes(wl, wr);
667 if (error)
668 goto errout;
669 }
670
671 error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
672 if (error) {
673 goto errout;
674 }
675
676 *wlp = wl;
677 #if defined(WAPBL_DEBUG)
678 wapbl_debug_wl = wl;
679 #endif
680
681 return 0;
682 errout:
683 wapbl_discard(wl);
684 wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
685 wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
686 while (!TAILQ_EMPTY(&wl->wl_iobufs)) {
687 struct buf *bp;
688
689 bp = TAILQ_FIRST(&wl->wl_iobufs);
690 TAILQ_REMOVE(&wl->wl_iobufs, bp, b_wapbllist);
691 brelse(bp, BC_INVAL);
692 }
693 wapbl_inodetrk_free(wl);
694 wapbl_free(wl, sizeof(*wl));
695
696 return error;
697 }
698
699 /*
700 * Like wapbl_flush, only discards the transaction
701 * completely
702 */
703
704 void
705 wapbl_discard(struct wapbl *wl)
706 {
707 struct wapbl_entry *we;
708 struct wapbl_dealloc *wd;
709 struct buf *bp;
710 int i;
711
712 /*
713 * XXX we may consider using upgrade here
714 * if we want to call flush from inside a transaction
715 */
716 rw_enter(&wl->wl_rwlock, RW_WRITER);
717 wl->wl_flush(wl->wl_mount, TAILQ_FIRST(&wl->wl_dealloclist));
718
719 #ifdef WAPBL_DEBUG_PRINT
720 {
721 pid_t pid = -1;
722 lwpid_t lid = -1;
723 if (curproc)
724 pid = curproc->p_pid;
725 if (curlwp)
726 lid = curlwp->l_lid;
727 #ifdef WAPBL_DEBUG_BUFBYTES
728 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
729 ("wapbl_discard: thread %d.%d discarding "
730 "transaction\n"
731 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
732 "deallocs=%d inodes=%d\n"
733 "\terrcnt = %u, reclaimable=%zu reserved=%zu "
734 "unsynced=%zu\n",
735 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
736 wl->wl_bcount, wl->wl_dealloccnt,
737 wl->wl_inohashcnt, wl->wl_error_count,
738 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
739 wl->wl_unsynced_bufbytes));
740 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
741 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
742 ("\tentry: bufcount = %zu, reclaimable = %zu, "
743 "error = %d, unsynced = %zu\n",
744 we->we_bufcount, we->we_reclaimable_bytes,
745 we->we_error, we->we_unsynced_bufbytes));
746 }
747 #else /* !WAPBL_DEBUG_BUFBYTES */
748 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
749 ("wapbl_discard: thread %d.%d discarding transaction\n"
750 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
751 "deallocs=%d inodes=%d\n"
752 "\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
753 pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
754 wl->wl_bcount, wl->wl_dealloccnt,
755 wl->wl_inohashcnt, wl->wl_error_count,
756 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes));
757 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
758 WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
759 ("\tentry: bufcount = %zu, reclaimable = %zu, "
760 "error = %d\n",
761 we->we_bufcount, we->we_reclaimable_bytes,
762 we->we_error));
763 }
764 #endif /* !WAPBL_DEBUG_BUFBYTES */
765 }
766 #endif /* WAPBL_DEBUG_PRINT */
767
768 for (i = 0; i <= wl->wl_inohashmask; i++) {
769 struct wapbl_ino_head *wih;
770 struct wapbl_ino *wi;
771
772 wih = &wl->wl_inohash[i];
773 while ((wi = LIST_FIRST(wih)) != NULL) {
774 LIST_REMOVE(wi, wi_hash);
775 pool_put(&wapbl_ino_pool, wi);
776 KASSERT(wl->wl_inohashcnt > 0);
777 wl->wl_inohashcnt--;
778 }
779 }
780
781 /*
782 * clean buffer list
783 */
784 mutex_enter(&bufcache_lock);
785 mutex_enter(&wl->wl_mtx);
786 while ((bp = TAILQ_FIRST(&wl->wl_bufs)) != NULL) {
787 if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
788 /*
789 * The buffer will be unlocked and
790 * removed from the transaction in brelse
791 */
792 mutex_exit(&wl->wl_mtx);
793 brelsel(bp, 0);
794 mutex_enter(&wl->wl_mtx);
795 }
796 }
797 mutex_exit(&wl->wl_mtx);
798 mutex_exit(&bufcache_lock);
799
800 /*
801 * Remove references to this wl from wl_entries, free any which
802 * no longer have buffers, others will be freed in wapbl_biodone
803 * when they no longer have any buffers.
804 */
805 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
806 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
807 /* XXX should we be accumulating wl_error_count
808 * and increasing reclaimable bytes ? */
809 we->we_wapbl = NULL;
810 if (we->we_bufcount == 0) {
811 #ifdef WAPBL_DEBUG_BUFBYTES
812 KASSERT(we->we_unsynced_bufbytes == 0);
813 #endif
814 pool_put(&wapbl_entry_pool, we);
815 }
816 }
817
818 /* Discard list of deallocs */
819 while ((wd = TAILQ_FIRST(&wl->wl_dealloclist)) != NULL)
820 wapbl_deallocation_free(wl, wd, true);
821
822 /* XXX should we clear wl_reserved_bytes? */
823
824 KASSERT(wl->wl_bufbytes == 0);
825 KASSERT(wl->wl_bcount == 0);
826 KASSERT(wl->wl_bufcount == 0);
827 KASSERT(TAILQ_EMPTY(&wl->wl_bufs));
828 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
829 KASSERT(wl->wl_inohashcnt == 0);
830 KASSERT(TAILQ_EMPTY(&wl->wl_dealloclist));
831 KASSERT(wl->wl_dealloccnt == 0);
832
833 rw_exit(&wl->wl_rwlock);
834 }
835
836 int
837 wapbl_stop(struct wapbl *wl, int force)
838 {
839 int error;
840
841 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
842 error = wapbl_flush(wl, 1);
843 if (error) {
844 if (force)
845 wapbl_discard(wl);
846 else
847 return error;
848 }
849
850 /* Unlinked inodes persist after a flush */
851 if (wl->wl_inohashcnt) {
852 if (force) {
853 wapbl_discard(wl);
854 } else {
855 return EBUSY;
856 }
857 }
858
859 KASSERT(wl->wl_bufbytes == 0);
860 KASSERT(wl->wl_bcount == 0);
861 KASSERT(wl->wl_bufcount == 0);
862 KASSERT(TAILQ_EMPTY(&wl->wl_bufs));
863 KASSERT(wl->wl_dealloccnt == 0);
864 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
865 KASSERT(wl->wl_inohashcnt == 0);
866 KASSERT(TAILQ_EMPTY(&wl->wl_dealloclist));
867 KASSERT(wl->wl_dealloccnt == 0);
868 KASSERT(TAILQ_EMPTY(&wl->wl_iobufs_busy));
869
870 wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
871 wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
872 while (!TAILQ_EMPTY(&wl->wl_iobufs)) {
873 struct buf *bp;
874
875 bp = TAILQ_FIRST(&wl->wl_iobufs);
876 TAILQ_REMOVE(&wl->wl_iobufs, bp, b_wapbllist);
877 brelse(bp, BC_INVAL);
878 }
879 wapbl_inodetrk_free(wl);
880
881 wapbl_evcnt_free(wl);
882
883 cv_destroy(&wl->wl_reclaimable_cv);
884 mutex_destroy(&wl->wl_mtx);
885 rw_destroy(&wl->wl_rwlock);
886 wapbl_free(wl, sizeof(*wl));
887
888 return 0;
889 }
890
891 /****************************************************************/
892 /*
893 * Unbuffered disk I/O
894 */
895
896 static void
897 wapbl_doio_accounting(struct vnode *devvp, int flags)
898 {
899 struct pstats *pstats = curlwp->l_proc->p_stats;
900
901 if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
902 mutex_enter(devvp->v_interlock);
903 devvp->v_numoutput++;
904 mutex_exit(devvp->v_interlock);
905 pstats->p_ru.ru_oublock++;
906 } else {
907 pstats->p_ru.ru_inblock++;
908 }
909
910 }
911
912 static int
913 wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
914 {
915 struct buf *bp;
916 int error;
917
918 KASSERT(devvp->v_type == VBLK);
919
920 wapbl_doio_accounting(devvp, flags);
921
922 bp = getiobuf(devvp, true);
923 bp->b_flags = flags;
924 bp->b_cflags = BC_BUSY; /* mandatory, asserted by biowait() */
925 bp->b_dev = devvp->v_rdev;
926 bp->b_data = data;
927 bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
928 bp->b_blkno = pbn;
929 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
930
931 WAPBL_PRINTF(WAPBL_PRINT_IO,
932 ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%"PRIx64"\n",
933 BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
934 bp->b_blkno, bp->b_dev));
935
936 VOP_STRATEGY(devvp, bp);
937
938 error = biowait(bp);
939 putiobuf(bp);
940
941 if (error) {
942 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
943 ("wapbl_doio: %s %zu bytes at block %" PRId64
944 " on dev 0x%"PRIx64" failed with error %d\n",
945 (((flags & (B_WRITE | B_READ)) == B_WRITE) ?
946 "write" : "read"),
947 len, pbn, devvp->v_rdev, error));
948 }
949
950 return error;
951 }
952
953 /*
954 * wapbl_write(data, len, devvp, pbn)
955 *
956 * Synchronously write len bytes from data to physical block pbn
957 * on devvp.
958 */
959 int
960 wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
961 {
962
963 return wapbl_doio(data, len, devvp, pbn, B_WRITE);
964 }
965
966 /*
967 * wapbl_read(data, len, devvp, pbn)
968 *
969 * Synchronously read len bytes into data from physical block pbn
970 * on devvp.
971 */
972 int
973 wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
974 {
975
976 return wapbl_doio(data, len, devvp, pbn, B_READ);
977 }
978
979 /****************************************************************/
980 /*
981 * Buffered disk writes -- try to coalesce writes and emit
982 * MAXPHYS-aligned blocks.
983 */
984
985 /*
986 * wapbl_buffered_write_async(wl, bp)
987 *
988 * Send buffer for asynchronous write.
989 */
990 static void
991 wapbl_buffered_write_async(struct wapbl *wl, struct buf *bp)
992 {
993 wapbl_doio_accounting(wl->wl_devvp, bp->b_flags);
994
995 KASSERT(TAILQ_FIRST(&wl->wl_iobufs) == bp);
996 TAILQ_REMOVE(&wl->wl_iobufs, bp, b_wapbllist);
997
998 bp->b_flags |= B_WRITE;
999 bp->b_cflags = BC_BUSY; /* mandatory, asserted by biowait() */
1000 bp->b_oflags = 0;
1001 bp->b_bcount = bp->b_resid;
1002 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
1003
1004 VOP_STRATEGY(wl->wl_devvp, bp);
1005
1006 wl->wl_ev_journalwrite.ev_count++;
1007
1008 TAILQ_INSERT_TAIL(&wl->wl_iobufs_busy, bp, b_wapbllist);
1009 }
1010
1011 /*
1012 * wapbl_buffered_flush(wl)
1013 *
1014 * Flush any buffered writes from wapbl_buffered_write.
1015 */
1016 static int
1017 wapbl_buffered_flush(struct wapbl *wl, bool full)
1018 {
1019 int error = 0;
1020 struct buf *bp, *bnext;
1021 bool only_done = true, found = false;
1022
1023 /* if there is outstanding buffered write, send it now */
1024 if ((bp = TAILQ_FIRST(&wl->wl_iobufs)) && bp->b_resid > 0)
1025 wapbl_buffered_write_async(wl, bp);
1026
1027 /* wait for I/O to complete */
1028 again:
1029 TAILQ_FOREACH_SAFE(bp, &wl->wl_iobufs_busy, b_wapbllist, bnext) {
1030 if (!full && only_done) {
1031 /* skip unfinished */
1032 if (!ISSET(bp->b_oflags, BO_DONE))
1033 continue;
1034 }
1035
1036 if (ISSET(bp->b_oflags, BO_DONE))
1037 wl->wl_ev_jbufs_bio_nowait.ev_count++;
1038
1039 TAILQ_REMOVE(&wl->wl_iobufs_busy, bp, b_wapbllist);
1040 error = biowait(bp);
1041
1042 /* reset for reuse */
1043 bp->b_blkno = bp->b_resid = bp->b_flags = 0;
1044 TAILQ_INSERT_TAIL(&wl->wl_iobufs, bp, b_wapbllist);
1045 found = true;
1046
1047 if (!full)
1048 break;
1049 }
1050
1051 if (!found && only_done && !TAILQ_EMPTY(&wl->wl_iobufs_busy)) {
1052 only_done = false;
1053 goto again;
1054 }
1055
1056 return error;
1057 }
1058
1059 /*
1060 * wapbl_buffered_write(data, len, wl, pbn)
1061 *
1062 * Write len bytes from data to physical block pbn on
1063 * wl->wl_devvp. The write may not complete until
1064 * wapbl_buffered_flush.
1065 */
1066 static int
1067 wapbl_buffered_write(void *data, size_t len, struct wapbl *wl, daddr_t pbn,
1068 int bflags)
1069 {
1070 size_t resid;
1071 struct buf *bp;
1072
1073 again:
1074 bp = TAILQ_FIRST(&wl->wl_iobufs);
1075
1076 if (bp == NULL) {
1077 /* No more buffers, wait for any previous I/O to finish. */
1078 wapbl_buffered_flush(wl, false);
1079
1080 bp = TAILQ_FIRST(&wl->wl_iobufs);
1081 KASSERT(bp != NULL);
1082 }
1083
1084 /*
1085 * If not adjacent to buffered data flush first. Disk block
1086 * address is always valid for non-empty buffer.
1087 */
1088 if ((bp->b_resid > 0 && pbn != bp->b_blkno + btodb(bp->b_resid))) {
1089 wapbl_buffered_write_async(wl, bp);
1090 goto again;
1091 }
1092
1093 /*
1094 * If this write goes to an empty buffer we have to
1095 * save the disk block address first.
1096 */
1097 if (bp->b_blkno == 0) {
1098 bp->b_blkno = pbn;
1099 bp->b_flags |= bflags;
1100 }
1101
1102 /*
1103 * Remaining space so this buffer ends on a buffer size boundary.
1104 *
1105 * Cannot become less or equal zero as the buffer would have been
1106 * flushed on the last call then.
1107 */
1108 resid = bp->b_bufsize - dbtob(bp->b_blkno % btodb(bp->b_bufsize)) -
1109 bp->b_resid;
1110 KASSERT(resid > 0);
1111 KASSERT(dbtob(btodb(resid)) == resid);
1112
1113 if (len < resid)
1114 resid = len;
1115
1116 memcpy((uint8_t *)bp->b_data + bp->b_resid, data, resid);
1117 bp->b_resid += resid;
1118
1119 if (len >= resid) {
1120 /* Just filled the buf, or data did not fit */
1121 wapbl_buffered_write_async(wl, bp);
1122
1123 data = (uint8_t *)data + resid;
1124 len -= resid;
1125 pbn += btodb(resid);
1126
1127 if (len > 0)
1128 goto again;
1129 }
1130
1131 return 0;
1132 }
1133
1134 /*
1135 * wapbl_circ_write(wl, data, len, offp)
1136 *
1137 * Write len bytes from data to the circular queue of wl, starting
1138 * at linear byte offset *offp, and returning the new linear byte
1139 * offset in *offp.
1140 *
1141 * If the starting linear byte offset precedes wl->wl_circ_off,
1142 * the write instead begins at wl->wl_circ_off. XXX WTF? This
1143 * should be a KASSERT, not a conditional.
1144 *
1145 * The write is buffered in wl and must be flushed with
1146 * wapbl_buffered_flush before it will be submitted to the disk.
1147 */
1148 static int
1149 wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
1150 {
1151 size_t slen;
1152 off_t off = *offp;
1153 int error;
1154 daddr_t pbn;
1155
1156 KDASSERT(((len >> wl->wl_log_dev_bshift) <<
1157 wl->wl_log_dev_bshift) == len);
1158
1159 if (off < wl->wl_circ_off)
1160 off = wl->wl_circ_off;
1161 slen = wl->wl_circ_off + wl->wl_circ_size - off;
1162 if (slen < len) {
1163 pbn = wl->wl_logpbn + (off >> wl->wl_log_dev_bshift);
1164 #ifdef _KERNEL
1165 pbn = btodb(pbn << wl->wl_log_dev_bshift);
1166 #endif
1167 error = wapbl_buffered_write(data, slen, wl, pbn,
1168 WAPBL_JDATA_FLAGS(wl));
1169 if (error)
1170 return error;
1171 data = (uint8_t *)data + slen;
1172 len -= slen;
1173 off = wl->wl_circ_off;
1174 }
1175 pbn = wl->wl_logpbn + (off >> wl->wl_log_dev_bshift);
1176 #ifdef _KERNEL
1177 pbn = btodb(pbn << wl->wl_log_dev_bshift);
1178 #endif
1179 error = wapbl_buffered_write(data, len, wl, pbn,
1180 WAPBL_JDATA_FLAGS(wl));
1181 if (error)
1182 return error;
1183 off += len;
1184 if (off >= wl->wl_circ_off + wl->wl_circ_size)
1185 off = wl->wl_circ_off;
1186 *offp = off;
1187 return 0;
1188 }
1189
1190 /****************************************************************/
1191 /*
1192 * WAPBL transactions: entering, adding/removing bufs, and exiting
1193 */
1194
1195 int
1196 wapbl_begin(struct wapbl *wl, const char *file, int line)
1197 {
1198 int doflush;
1199 unsigned lockcount;
1200 uint32_t maxphys;
1201
1202 KDASSERT(wl);
1203
1204 /*
1205 * XXX this needs to be made much more sophisticated.
1206 * perhaps each wapbl_begin could reserve a specified
1207 * number of buffers and bytes.
1208 */
1209 mutex_enter(&wl->wl_mtx);
1210 lockcount = wl->wl_lock_count;
1211 maxphys = wl->wl_mount->mnt_maxphys;
1212 doflush = ((wl->wl_bufbytes + (lockcount * maxphys)) >
1213 wl->wl_bufbytes_max / 2) ||
1214 ((wl->wl_bufcount + (lockcount * 10)) >
1215 wl->wl_bufcount_max / 2) ||
1216 (wapbl_transaction_len(wl) > wl->wl_circ_size / 2) ||
1217 (wl->wl_dealloccnt >= (wl->wl_dealloclim / 2));
1218 mutex_exit(&wl->wl_mtx);
1219
1220 if (doflush) {
1221 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1222 ("force flush lockcnt=%d bufbytes=%zu "
1223 "(max=%zu) bufcount=%zu (max=%zu) "
1224 "dealloccnt %d (lim=%d)\n",
1225 lockcount, wl->wl_bufbytes,
1226 wl->wl_bufbytes_max, wl->wl_bufcount,
1227 wl->wl_bufcount_max,
1228 wl->wl_dealloccnt, wl->wl_dealloclim));
1229 }
1230
1231 if (doflush) {
1232 int error = wapbl_flush(wl, 0);
1233 if (error)
1234 return error;
1235 }
1236
1237 rw_enter(&wl->wl_rwlock, RW_READER);
1238 mutex_enter(&wl->wl_mtx);
1239 wl->wl_lock_count++;
1240 mutex_exit(&wl->wl_mtx);
1241
1242 #if defined(WAPBL_DEBUG_PRINT)
1243 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
1244 ("wapbl_begin thread %d.%d with bufcount=%zu "
1245 "bufbytes=%zu bcount=%zu at %s:%d\n",
1246 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1247 wl->wl_bufbytes, wl->wl_bcount, file, line));
1248 #endif
1249
1250 return 0;
1251 }
1252
1253 void
1254 wapbl_end(struct wapbl *wl)
1255 {
1256
1257 #if defined(WAPBL_DEBUG_PRINT)
1258 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
1259 ("wapbl_end thread %d.%d with bufcount=%zu "
1260 "bufbytes=%zu bcount=%zu\n",
1261 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1262 wl->wl_bufbytes, wl->wl_bcount));
1263 #endif
1264
1265 /*
1266 * XXX this could be handled more gracefully, perhaps place
1267 * only a partial transaction in the log and allow the
1268 * remaining to flush without the protection of the journal.
1269 */
1270 KASSERTMSG((wapbl_transaction_len(wl) <=
1271 (wl->wl_circ_size - wl->wl_reserved_bytes)),
1272 "wapbl_end: current transaction too big to flush");
1273
1274 mutex_enter(&wl->wl_mtx);
1275 KASSERT(wl->wl_lock_count > 0);
1276 wl->wl_lock_count--;
1277 mutex_exit(&wl->wl_mtx);
1278
1279 rw_exit(&wl->wl_rwlock);
1280 }
1281
1282 void
1283 wapbl_add_buf(struct wapbl *wl, struct buf * bp)
1284 {
1285
1286 KASSERT(bp->b_cflags & BC_BUSY);
1287 KASSERT(bp->b_vp);
1288
1289 wapbl_jlock_assert(wl);
1290
1291 #if 0
1292 /*
1293 * XXX this might be an issue for swapfiles.
1294 * see uvm_swap.c:1702
1295 *
1296 * XXX2 why require it then? leap of semantics?
1297 */
1298 KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
1299 #endif
1300
1301 mutex_enter(&wl->wl_mtx);
1302 if (bp->b_flags & B_LOCKED) {
1303 TAILQ_REMOVE(&wl->wl_bufs, bp, b_wapbllist);
1304 WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
1305 ("wapbl_add_buf thread %d.%d re-adding buf %p "
1306 "with %d bytes %d bcount\n",
1307 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
1308 bp->b_bcount));
1309 } else {
1310 /* unlocked by dirty buffers shouldn't exist */
1311 KASSERT(!(bp->b_oflags & BO_DELWRI));
1312 wl->wl_bufbytes += bp->b_bufsize;
1313 wl->wl_bcount += bp->b_bcount;
1314 wl->wl_bufcount++;
1315 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
1316 ("wapbl_add_buf thread %d.%d adding buf %p "
1317 "with %d bytes %d bcount\n",
1318 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
1319 bp->b_bcount));
1320 }
1321 TAILQ_INSERT_TAIL(&wl->wl_bufs, bp, b_wapbllist);
1322 mutex_exit(&wl->wl_mtx);
1323
1324 bp->b_flags |= B_LOCKED;
1325 }
1326
1327 static void
1328 wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
1329 {
1330
1331 KASSERT(mutex_owned(&wl->wl_mtx));
1332 KASSERT(bp->b_cflags & BC_BUSY);
1333 wapbl_jlock_assert(wl);
1334
1335 #if 0
1336 /*
1337 * XXX this might be an issue for swapfiles.
1338 * see uvm_swap.c:1725
1339 *
1340 * XXXdeux: see above
1341 */
1342 KASSERT((bp->b_flags & BC_NOCACHE) == 0);
1343 #endif
1344 KASSERT(bp->b_flags & B_LOCKED);
1345
1346 WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
1347 ("wapbl_remove_buf thread %d.%d removing buf %p with "
1348 "%d bytes %d bcount\n",
1349 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, bp->b_bcount));
1350
1351 KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
1352 wl->wl_bufbytes -= bp->b_bufsize;
1353 KASSERT(wl->wl_bcount >= bp->b_bcount);
1354 wl->wl_bcount -= bp->b_bcount;
1355 KASSERT(wl->wl_bufcount > 0);
1356 wl->wl_bufcount--;
1357 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1358 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1359 TAILQ_REMOVE(&wl->wl_bufs, bp, b_wapbllist);
1360
1361 bp->b_flags &= ~B_LOCKED;
1362 }
1363
1364 /* called from brelsel() in vfs_bio among other places */
1365 void
1366 wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
1367 {
1368
1369 mutex_enter(&wl->wl_mtx);
1370 wapbl_remove_buf_locked(wl, bp);
1371 mutex_exit(&wl->wl_mtx);
1372 }
1373
1374 void
1375 wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
1376 {
1377
1378 KASSERT(bp->b_cflags & BC_BUSY);
1379
1380 /*
1381 * XXX: why does this depend on B_LOCKED? otherwise the buf
1382 * is not for a transaction? if so, why is this called in the
1383 * first place?
1384 */
1385 if (bp->b_flags & B_LOCKED) {
1386 mutex_enter(&wl->wl_mtx);
1387 wl->wl_bufbytes += bp->b_bufsize - oldsz;
1388 wl->wl_bcount += bp->b_bcount - oldcnt;
1389 mutex_exit(&wl->wl_mtx);
1390 }
1391 }
1392
1393 #endif /* _KERNEL */
1394
1395 /****************************************************************/
1396 /* Some utility inlines */
1397
1398 /*
1399 * wapbl_space_used(avail, head, tail)
1400 *
1401 * Number of bytes used in a circular queue of avail total bytes,
1402 * from tail to head.
1403 */
1404 static inline size_t
1405 wapbl_space_used(size_t avail, off_t head, off_t tail)
1406 {
1407
1408 if (tail == 0) {
1409 KASSERT(head == 0);
1410 return 0;
1411 }
1412 return ((head + (avail - 1) - tail) % avail) + 1;
1413 }
1414
1415 #ifdef _KERNEL
1416 /*
1417 * wapbl_advance(size, off, oldoff, delta)
1418 *
1419 * Given a byte offset oldoff into a circular queue of size bytes
1420 * starting at off, return a new byte offset oldoff + delta into
1421 * the circular queue.
1422 */
1423 static inline off_t
1424 wapbl_advance(size_t size, size_t off, off_t oldoff, size_t delta)
1425 {
1426 off_t newoff;
1427
1428 /* Define acceptable ranges for inputs. */
1429 KASSERT(delta <= (size_t)size);
1430 KASSERT((oldoff == 0) || ((size_t)oldoff >= off));
1431 KASSERT(oldoff < (off_t)(size + off));
1432
1433 if ((oldoff == 0) && (delta != 0))
1434 newoff = off + delta;
1435 else if ((oldoff + delta) < (size + off))
1436 newoff = oldoff + delta;
1437 else
1438 newoff = (oldoff + delta) - size;
1439
1440 /* Note some interesting axioms */
1441 KASSERT((delta != 0) || (newoff == oldoff));
1442 KASSERT((delta == 0) || (newoff != 0));
1443 KASSERT((delta != (size)) || (newoff == oldoff));
1444
1445 /* Define acceptable ranges for output. */
1446 KASSERT((newoff == 0) || ((size_t)newoff >= off));
1447 KASSERT((size_t)newoff < (size + off));
1448 return newoff;
1449 }
1450
1451 /*
1452 * wapbl_space_free(avail, head, tail)
1453 *
1454 * Number of bytes free in a circular queue of avail total bytes,
1455 * in which everything from tail to head is used.
1456 */
1457 static inline size_t
1458 wapbl_space_free(size_t avail, off_t head, off_t tail)
1459 {
1460
1461 return avail - wapbl_space_used(avail, head, tail);
1462 }
1463
1464 /*
1465 * wapbl_advance_head(size, off, delta, headp, tailp)
1466 *
1467 * In a circular queue of size bytes starting at off, given the
1468 * old head and tail offsets *headp and *tailp, store the new head
1469 * and tail offsets in *headp and *tailp resulting from adding
1470 * delta bytes of data to the head.
1471 */
1472 static inline void
1473 wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1474 off_t *tailp)
1475 {
1476 off_t head = *headp;
1477 off_t tail = *tailp;
1478
1479 KASSERT(delta <= wapbl_space_free(size, head, tail));
1480 head = wapbl_advance(size, off, head, delta);
1481 if ((tail == 0) && (head != 0))
1482 tail = off;
1483 *headp = head;
1484 *tailp = tail;
1485 }
1486
1487 /*
1488 * wapbl_advance_tail(size, off, delta, headp, tailp)
1489 *
1490 * In a circular queue of size bytes starting at off, given the
1491 * old head and tail offsets *headp and *tailp, store the new head
1492 * and tail offsets in *headp and *tailp resulting from removing
1493 * delta bytes of data from the tail.
1494 */
1495 static inline void
1496 wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1497 off_t *tailp)
1498 {
1499 off_t head = *headp;
1500 off_t tail = *tailp;
1501
1502 KASSERT(delta <= wapbl_space_used(size, head, tail));
1503 tail = wapbl_advance(size, off, tail, delta);
1504 if (head == tail) {
1505 head = tail = 0;
1506 }
1507 *headp = head;
1508 *tailp = tail;
1509 }
1510
1511
1512 /****************************************************************/
1513
1514 /*
1515 * wapbl_truncate(wl, minfree)
1516 *
1517 * Wait until at least minfree bytes are available in the log.
1518 *
1519 * If it was necessary to wait for writes to complete,
1520 * advance the circular queue tail to reflect the new write
1521 * completions and issue a write commit to the log.
1522 *
1523 * => Caller must hold wl->wl_rwlock writer lock.
1524 */
1525 static int
1526 wapbl_truncate(struct wapbl *wl, size_t minfree)
1527 {
1528 size_t delta;
1529 size_t avail;
1530 off_t head;
1531 off_t tail;
1532 int error = 0;
1533
1534 KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1535 KASSERT(rw_write_held(&wl->wl_rwlock));
1536
1537 mutex_enter(&wl->wl_mtx);
1538
1539 /*
1540 * First check to see if we have to do a commit
1541 * at all.
1542 */
1543 avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1544 if (minfree < avail) {
1545 mutex_exit(&wl->wl_mtx);
1546 return 0;
1547 }
1548 minfree -= avail;
1549 while ((wl->wl_error_count == 0) &&
1550 (wl->wl_reclaimable_bytes < minfree)) {
1551 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1552 ("wapbl_truncate: sleeping on %p wl=%p bytes=%zd "
1553 "minfree=%zd\n",
1554 &wl->wl_reclaimable_bytes, wl, wl->wl_reclaimable_bytes,
1555 minfree));
1556
1557 cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1558 }
1559 if (wl->wl_reclaimable_bytes < minfree) {
1560 KASSERT(wl->wl_error_count);
1561 /* XXX maybe get actual error from buffer instead someday? */
1562 error = EIO;
1563 }
1564 head = wl->wl_head;
1565 tail = wl->wl_tail;
1566 delta = wl->wl_reclaimable_bytes;
1567
1568 /* If all of of the entries are flushed, then be sure to keep
1569 * the reserved bytes reserved. Watch out for discarded transactions,
1570 * which could leave more bytes reserved than are reclaimable.
1571 */
1572 if (SIMPLEQ_EMPTY(&wl->wl_entries) &&
1573 (delta >= wl->wl_reserved_bytes)) {
1574 delta -= wl->wl_reserved_bytes;
1575 }
1576 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1577 &tail);
1578 KDASSERT(wl->wl_reserved_bytes <=
1579 wapbl_space_used(wl->wl_circ_size, head, tail));
1580 mutex_exit(&wl->wl_mtx);
1581
1582 if (error)
1583 return error;
1584
1585 /*
1586 * This is where head, tail and delta are unprotected
1587 * from races against itself or flush. This is ok since
1588 * we only call this routine from inside flush itself.
1589 *
1590 * XXX: how can it race against itself when accessed only
1591 * from behind the write-locked rwlock?
1592 */
1593 error = wapbl_write_commit(wl, head, tail);
1594 if (error)
1595 return error;
1596
1597 wl->wl_head = head;
1598 wl->wl_tail = tail;
1599
1600 mutex_enter(&wl->wl_mtx);
1601 KASSERT(wl->wl_reclaimable_bytes >= delta);
1602 wl->wl_reclaimable_bytes -= delta;
1603 mutex_exit(&wl->wl_mtx);
1604 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1605 ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1606 curproc->p_pid, curlwp->l_lid, delta));
1607
1608 return 0;
1609 }
1610
1611 /****************************************************************/
1612
1613 void
1614 wapbl_biodone(struct buf *bp)
1615 {
1616 struct wapbl_entry *we = bp->b_private;
1617 struct wapbl *wl = we->we_wapbl;
1618 #ifdef WAPBL_DEBUG_BUFBYTES
1619 const int bufsize = bp->b_bufsize;
1620 #endif
1621
1622 /*
1623 * Handle possible flushing of buffers after log has been
1624 * decomissioned.
1625 */
1626 if (!wl) {
1627 KASSERT(we->we_bufcount > 0);
1628 we->we_bufcount--;
1629 #ifdef WAPBL_DEBUG_BUFBYTES
1630 KASSERT(we->we_unsynced_bufbytes >= bufsize);
1631 we->we_unsynced_bufbytes -= bufsize;
1632 #endif
1633
1634 if (we->we_bufcount == 0) {
1635 #ifdef WAPBL_DEBUG_BUFBYTES
1636 KASSERT(we->we_unsynced_bufbytes == 0);
1637 #endif
1638 pool_put(&wapbl_entry_pool, we);
1639 }
1640
1641 brelse(bp, 0);
1642 return;
1643 }
1644
1645 #ifdef ohbother
1646 KDASSERT(bp->b_oflags & BO_DONE);
1647 KDASSERT(!(bp->b_oflags & BO_DELWRI));
1648 KDASSERT(bp->b_flags & B_ASYNC);
1649 KDASSERT(bp->b_cflags & BC_BUSY);
1650 KDASSERT(!(bp->b_flags & B_LOCKED));
1651 KDASSERT(!(bp->b_flags & B_READ));
1652 KDASSERT(!(bp->b_cflags & BC_INVAL));
1653 KDASSERT(!(bp->b_cflags & BC_NOCACHE));
1654 #endif
1655
1656 if (bp->b_error) {
1657 /*
1658 * If an error occurs, it would be nice to leave the buffer
1659 * as a delayed write on the LRU queue so that we can retry
1660 * it later. But buffercache(9) can't handle dirty buffer
1661 * reuse, so just mark the log permanently errored out.
1662 */
1663 mutex_enter(&wl->wl_mtx);
1664 if (wl->wl_error_count == 0) {
1665 wl->wl_error_count++;
1666 cv_broadcast(&wl->wl_reclaimable_cv);
1667 }
1668 mutex_exit(&wl->wl_mtx);
1669 }
1670
1671 /*
1672 * Make sure that the buf doesn't retain the media flags, so that
1673 * e.g. wapbl_allow_fuadpo has immediate effect on any following I/O.
1674 * The flags will be set again if needed by another I/O.
1675 */
1676 bp->b_flags &= ~B_MEDIA_FLAGS;
1677
1678 /*
1679 * Release the buffer here. wapbl_flush() may wait for the
1680 * log to become empty and we better unbusy the buffer before
1681 * wapbl_flush() returns.
1682 */
1683 brelse(bp, 0);
1684
1685 mutex_enter(&wl->wl_mtx);
1686
1687 KASSERT(we->we_bufcount > 0);
1688 we->we_bufcount--;
1689 #ifdef WAPBL_DEBUG_BUFBYTES
1690 KASSERT(we->we_unsynced_bufbytes >= bufsize);
1691 we->we_unsynced_bufbytes -= bufsize;
1692 KASSERT(wl->wl_unsynced_bufbytes >= bufsize);
1693 wl->wl_unsynced_bufbytes -= bufsize;
1694 #endif
1695 wl->wl_ev_metawrite.ev_count++;
1696
1697 /*
1698 * If the current transaction can be reclaimed, start
1699 * at the beginning and reclaim any consecutive reclaimable
1700 * transactions. If we successfully reclaim anything,
1701 * then wakeup anyone waiting for the reclaim.
1702 */
1703 if (we->we_bufcount == 0) {
1704 size_t delta = 0;
1705 int errcnt = 0;
1706 #ifdef WAPBL_DEBUG_BUFBYTES
1707 KDASSERT(we->we_unsynced_bufbytes == 0);
1708 #endif
1709 /*
1710 * clear any posted error, since the buffer it came from
1711 * has successfully flushed by now
1712 */
1713 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1714 (we->we_bufcount == 0)) {
1715 delta += we->we_reclaimable_bytes;
1716 if (we->we_error)
1717 errcnt++;
1718 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1719 pool_put(&wapbl_entry_pool, we);
1720 }
1721
1722 if (delta) {
1723 wl->wl_reclaimable_bytes += delta;
1724 KASSERT(wl->wl_error_count >= errcnt);
1725 wl->wl_error_count -= errcnt;
1726 cv_broadcast(&wl->wl_reclaimable_cv);
1727 }
1728 }
1729
1730 mutex_exit(&wl->wl_mtx);
1731 }
1732
1733 /*
1734 * wapbl_flush(wl, wait)
1735 *
1736 * Flush pending block writes, deallocations, and inodes from
1737 * the current transaction in memory to the log on disk:
1738 *
1739 * 1. Call the file system's wl_flush callback to flush any
1740 * per-file-system pending updates.
1741 * 2. Wait for enough space in the log for the current transaction.
1742 * 3. Synchronously write the new log records, advancing the
1743 * circular queue head.
1744 * 4. Issue the pending block writes asynchronously, now that they
1745 * are recorded in the log and can be replayed after crash.
1746 * 5. If wait is true, wait for all writes to complete and for the
1747 * log to become empty.
1748 *
1749 * On failure, call the file system's wl_flush_abort callback.
1750 */
1751 int
1752 wapbl_flush(struct wapbl *wl, int waitfor)
1753 {
1754 struct buf *bp;
1755 struct wapbl_entry *we;
1756 off_t off;
1757 off_t head;
1758 off_t tail;
1759 size_t delta = 0;
1760 size_t flushsize;
1761 size_t reserved;
1762 int error = 0;
1763
1764 /*
1765 * Do a quick check to see if a full flush can be skipped
1766 * This assumes that the flush callback does not need to be called
1767 * unless there are other outstanding bufs.
1768 */
1769 if (!waitfor) {
1770 size_t nbufs;
1771 mutex_enter(&wl->wl_mtx); /* XXX need mutex here to
1772 protect the KASSERTS */
1773 nbufs = wl->wl_bufcount;
1774 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1775 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1776 mutex_exit(&wl->wl_mtx);
1777 if (nbufs == 0)
1778 return 0;
1779 }
1780
1781 /*
1782 * XXX we may consider using LK_UPGRADE here
1783 * if we want to call flush from inside a transaction
1784 */
1785 rw_enter(&wl->wl_rwlock, RW_WRITER);
1786 wl->wl_flush(wl->wl_mount, TAILQ_FIRST(&wl->wl_dealloclist));
1787
1788 /*
1789 * Now that we are exclusively locked and the file system has
1790 * issued any deferred block writes for this transaction, check
1791 * whether there are any blocks to write to the log. If not,
1792 * skip waiting for space or writing any log entries.
1793 *
1794 * XXX Shouldn't this also check wl_dealloccnt and
1795 * wl_inohashcnt? Perhaps wl_dealloccnt doesn't matter if the
1796 * file system didn't produce any blocks as a consequence of
1797 * it, but the same does not seem to be so of wl_inohashcnt.
1798 */
1799 if (wl->wl_bufcount == 0) {
1800 goto wait_out;
1801 }
1802
1803 #if 0
1804 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1805 ("wapbl_flush thread %d.%d flushing entries with "
1806 "bufcount=%zu bufbytes=%zu\n",
1807 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1808 wl->wl_bufbytes));
1809 #endif
1810
1811 /* Calculate amount of space needed to flush */
1812 flushsize = wapbl_transaction_len(wl);
1813 if (wapbl_verbose_commit) {
1814 struct timespec ts;
1815 getnanotime(&ts);
1816 printf("%s: %lld.%09ld this transaction = %zu bytes\n",
1817 __func__, (long long)ts.tv_sec,
1818 (long)ts.tv_nsec, flushsize);
1819 }
1820
1821 if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1822 /*
1823 * XXX this could be handled more gracefully, perhaps place
1824 * only a partial transaction in the log and allow the
1825 * remaining to flush without the protection of the journal.
1826 */
1827 panic("wapbl_flush: current transaction too big to flush");
1828 }
1829
1830 error = wapbl_truncate(wl, flushsize);
1831 if (error)
1832 goto out;
1833
1834 off = wl->wl_head;
1835 KASSERT((off == 0) || (off >= wl->wl_circ_off));
1836 KASSERT((off == 0) || (off < wl->wl_circ_off + wl->wl_circ_size));
1837 error = wapbl_write_blocks(wl, &off);
1838 if (error)
1839 goto out;
1840 error = wapbl_write_revocations(wl, &off);
1841 if (error)
1842 goto out;
1843 error = wapbl_write_inodes(wl, &off);
1844 if (error)
1845 goto out;
1846
1847 reserved = 0;
1848 if (wl->wl_inohashcnt)
1849 reserved = wapbl_transaction_inodes_len(wl);
1850
1851 head = wl->wl_head;
1852 tail = wl->wl_tail;
1853
1854 wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1855 &head, &tail);
1856
1857 KASSERTMSG(head == off,
1858 "lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1859 " off=%"PRIdMAX" flush=%zu",
1860 (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1861 flushsize);
1862
1863 /* Opportunistically move the tail forward if we can */
1864 mutex_enter(&wl->wl_mtx);
1865 delta = wl->wl_reclaimable_bytes;
1866 mutex_exit(&wl->wl_mtx);
1867 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1868 &head, &tail);
1869
1870 error = wapbl_write_commit(wl, head, tail);
1871 if (error)
1872 goto out;
1873
1874 we = pool_get(&wapbl_entry_pool, PR_WAITOK);
1875
1876 #ifdef WAPBL_DEBUG_BUFBYTES
1877 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1878 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1879 " unsynced=%zu"
1880 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1881 "inodes=%d\n",
1882 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1883 wapbl_space_used(wl->wl_circ_size, head, tail),
1884 wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1885 wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1886 wl->wl_inohashcnt));
1887 #else
1888 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1889 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1890 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1891 "inodes=%d\n",
1892 curproc->p_pid, curlwp->l_lid, flushsize, delta,
1893 wapbl_space_used(wl->wl_circ_size, head, tail),
1894 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1895 wl->wl_dealloccnt, wl->wl_inohashcnt));
1896 #endif
1897
1898
1899 mutex_enter(&bufcache_lock);
1900 mutex_enter(&wl->wl_mtx);
1901
1902 wl->wl_reserved_bytes = reserved;
1903 wl->wl_head = head;
1904 wl->wl_tail = tail;
1905 KASSERT(wl->wl_reclaimable_bytes >= delta);
1906 wl->wl_reclaimable_bytes -= delta;
1907 KDASSERT(wl->wl_dealloccnt == 0);
1908 #ifdef WAPBL_DEBUG_BUFBYTES
1909 wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1910 #endif
1911
1912 we->we_wapbl = wl;
1913 we->we_bufcount = wl->wl_bufcount;
1914 #ifdef WAPBL_DEBUG_BUFBYTES
1915 we->we_unsynced_bufbytes = wl->wl_bufbytes;
1916 #endif
1917 we->we_reclaimable_bytes = flushsize;
1918 we->we_error = 0;
1919 SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1920
1921 /*
1922 * This flushes bufs in order than they were queued, so the LRU
1923 * order is preserved.
1924 */
1925 while ((bp = TAILQ_FIRST(&wl->wl_bufs)) != NULL) {
1926 if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1927 continue;
1928 }
1929 bp->b_iodone = wapbl_biodone;
1930 bp->b_private = we;
1931
1932 bremfree(bp);
1933 wapbl_remove_buf_locked(wl, bp);
1934 mutex_exit(&wl->wl_mtx);
1935 mutex_exit(&bufcache_lock);
1936 bawrite(bp);
1937 mutex_enter(&bufcache_lock);
1938 mutex_enter(&wl->wl_mtx);
1939 }
1940 mutex_exit(&wl->wl_mtx);
1941 mutex_exit(&bufcache_lock);
1942
1943 #if 0
1944 WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1945 ("wapbl_flush thread %d.%d done flushing entries...\n",
1946 curproc->p_pid, curlwp->l_lid));
1947 #endif
1948
1949 wait_out:
1950
1951 /*
1952 * If the waitfor flag is set, don't return until everything is
1953 * fully flushed and the on disk log is empty.
1954 */
1955 if (waitfor) {
1956 error = wapbl_truncate(wl, wl->wl_circ_size -
1957 wl->wl_reserved_bytes);
1958 }
1959
1960 out:
1961 if (error) {
1962 wl->wl_flush_abort(wl->wl_mount,
1963 TAILQ_FIRST(&wl->wl_dealloclist));
1964 }
1965
1966 #ifdef WAPBL_DEBUG_PRINT
1967 if (error) {
1968 pid_t pid = -1;
1969 lwpid_t lid = -1;
1970 if (curproc)
1971 pid = curproc->p_pid;
1972 if (curlwp)
1973 lid = curlwp->l_lid;
1974 mutex_enter(&wl->wl_mtx);
1975 #ifdef WAPBL_DEBUG_BUFBYTES
1976 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1977 ("wapbl_flush: thread %d.%d aborted flush: "
1978 "error = %d\n"
1979 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1980 "deallocs=%d inodes=%d\n"
1981 "\terrcnt = %d, reclaimable=%zu reserved=%zu "
1982 "unsynced=%zu\n",
1983 pid, lid, error, wl->wl_bufcount,
1984 wl->wl_bufbytes, wl->wl_bcount,
1985 wl->wl_dealloccnt, wl->wl_inohashcnt,
1986 wl->wl_error_count, wl->wl_reclaimable_bytes,
1987 wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1988 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1989 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1990 ("\tentry: bufcount = %zu, reclaimable = %zu, "
1991 "error = %d, unsynced = %zu\n",
1992 we->we_bufcount, we->we_reclaimable_bytes,
1993 we->we_error, we->we_unsynced_bufbytes));
1994 }
1995 #else
1996 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1997 ("wapbl_flush: thread %d.%d aborted flush: "
1998 "error = %d\n"
1999 "\tbufcount=%zu bufbytes=%zu bcount=%zu "
2000 "deallocs=%d inodes=%d\n"
2001 "\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
2002 pid, lid, error, wl->wl_bufcount,
2003 wl->wl_bufbytes, wl->wl_bcount,
2004 wl->wl_dealloccnt, wl->wl_inohashcnt,
2005 wl->wl_error_count, wl->wl_reclaimable_bytes,
2006 wl->wl_reserved_bytes));
2007 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
2008 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
2009 ("\tentry: bufcount = %zu, reclaimable = %zu, "
2010 "error = %d\n", we->we_bufcount,
2011 we->we_reclaimable_bytes, we->we_error));
2012 }
2013 #endif
2014 mutex_exit(&wl->wl_mtx);
2015 }
2016 #endif
2017
2018 rw_exit(&wl->wl_rwlock);
2019 return error;
2020 }
2021
2022 /****************************************************************/
2023
2024 void
2025 wapbl_jlock_assert(struct wapbl *wl)
2026 {
2027
2028 KASSERT(rw_lock_held(&wl->wl_rwlock));
2029 }
2030
2031 void
2032 wapbl_junlock_assert(struct wapbl *wl)
2033 {
2034
2035 KASSERT(!rw_write_held(&wl->wl_rwlock));
2036 }
2037
2038 /****************************************************************/
2039
2040 /* locks missing */
2041 void
2042 wapbl_print(struct wapbl *wl,
2043 int full,
2044 void (*pr)(const char *, ...))
2045 {
2046 struct buf *bp;
2047 struct wapbl_entry *we;
2048 (*pr)("wapbl %p", wl);
2049 (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
2050 wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
2051 (*pr)("circ = %zu, header = %zu, head = %"PRIdMAX" tail = %"PRIdMAX"\n",
2052 wl->wl_circ_size, wl->wl_circ_off,
2053 (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
2054 (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
2055 wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
2056 #ifdef WAPBL_DEBUG_BUFBYTES
2057 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
2058 "reserved = %zu errcnt = %d unsynced = %zu\n",
2059 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
2060 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
2061 wl->wl_error_count, wl->wl_unsynced_bufbytes);
2062 #else
2063 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
2064 "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
2065 wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
2066 wl->wl_error_count);
2067 #endif
2068 (*pr)("\tdealloccnt = %d, dealloclim = %d\n",
2069 wl->wl_dealloccnt, wl->wl_dealloclim);
2070 (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
2071 wl->wl_inohashcnt, wl->wl_inohashmask);
2072 (*pr)("entries:\n");
2073 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
2074 #ifdef WAPBL_DEBUG_BUFBYTES
2075 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
2076 "unsynced = %zu\n",
2077 we->we_bufcount, we->we_reclaimable_bytes,
2078 we->we_error, we->we_unsynced_bufbytes);
2079 #else
2080 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
2081 we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
2082 #endif
2083 }
2084 if (full) {
2085 int cnt = 0;
2086 (*pr)("bufs =");
2087 TAILQ_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
2088 if (!TAILQ_NEXT(bp, b_wapbllist)) {
2089 (*pr)(" %p", bp);
2090 } else if ((++cnt % 6) == 0) {
2091 (*pr)(" %p,\n\t", bp);
2092 } else {
2093 (*pr)(" %p,", bp);
2094 }
2095 }
2096 (*pr)("\n");
2097
2098 (*pr)("dealloced blks = ");
2099 {
2100 struct wapbl_dealloc *wd;
2101 cnt = 0;
2102 TAILQ_FOREACH(wd, &wl->wl_dealloclist, wd_entries) {
2103 (*pr)(" %"PRId64":%d,",
2104 wd->wd_blkno,
2105 wd->wd_len);
2106 if ((++cnt % 4) == 0) {
2107 (*pr)("\n\t");
2108 }
2109 }
2110 }
2111 (*pr)("\n");
2112
2113 (*pr)("registered inodes = ");
2114 {
2115 int i;
2116 cnt = 0;
2117 for (i = 0; i <= wl->wl_inohashmask; i++) {
2118 struct wapbl_ino_head *wih;
2119 struct wapbl_ino *wi;
2120
2121 wih = &wl->wl_inohash[i];
2122 LIST_FOREACH(wi, wih, wi_hash) {
2123 if (wi->wi_ino == 0)
2124 continue;
2125 (*pr)(" %"PRIu64"/0%06"PRIo32",",
2126 wi->wi_ino, wi->wi_mode);
2127 if ((++cnt % 4) == 0) {
2128 (*pr)("\n\t");
2129 }
2130 }
2131 }
2132 (*pr)("\n");
2133 }
2134
2135 (*pr)("iobufs free =");
2136 TAILQ_FOREACH(bp, &wl->wl_iobufs, b_wapbllist) {
2137 if (!TAILQ_NEXT(bp, b_wapbllist)) {
2138 (*pr)(" %p", bp);
2139 } else if ((++cnt % 6) == 0) {
2140 (*pr)(" %p,\n\t", bp);
2141 } else {
2142 (*pr)(" %p,", bp);
2143 }
2144 }
2145 (*pr)("\n");
2146
2147 (*pr)("iobufs busy =");
2148 TAILQ_FOREACH(bp, &wl->wl_iobufs_busy, b_wapbllist) {
2149 if (!TAILQ_NEXT(bp, b_wapbllist)) {
2150 (*pr)(" %p", bp);
2151 } else if ((++cnt % 6) == 0) {
2152 (*pr)(" %p,\n\t", bp);
2153 } else {
2154 (*pr)(" %p,", bp);
2155 }
2156 }
2157 (*pr)("\n");
2158 }
2159 }
2160
2161 #if defined(WAPBL_DEBUG) || defined(DDB)
2162 void
2163 wapbl_dump(struct wapbl *wl)
2164 {
2165 #if defined(WAPBL_DEBUG)
2166 if (!wl)
2167 wl = wapbl_debug_wl;
2168 #endif
2169 if (!wl)
2170 return;
2171 wapbl_print(wl, 1, printf);
2172 }
2173 #endif
2174
2175 /****************************************************************/
2176
2177 int
2178 wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len, bool force,
2179 void **cookiep)
2180 {
2181 struct wapbl_dealloc *wd;
2182 int error = 0;
2183
2184 wapbl_jlock_assert(wl);
2185
2186 mutex_enter(&wl->wl_mtx);
2187
2188 if (__predict_false(wl->wl_dealloccnt >= wl->wl_dealloclim)) {
2189 if (!force) {
2190 error = EAGAIN;
2191 goto out;
2192 }
2193
2194 /*
2195 * Forced registration can only be used when:
2196 * 1) the caller can't cope with failure
2197 * 2) the path can be triggered only bounded, small
2198 * times per transaction
2199 * If this is not fullfilled, and the path would be triggered
2200 * many times, this could overflow maximum transaction size
2201 * and panic later.
2202 */
2203 printf("%s: forced dealloc registration over limit: %d >= %d\n",
2204 wl->wl_mount->mnt_stat.f_mntonname,
2205 wl->wl_dealloccnt, wl->wl_dealloclim);
2206 }
2207
2208 wl->wl_dealloccnt++;
2209 mutex_exit(&wl->wl_mtx);
2210
2211 wd = pool_get(&wapbl_dealloc_pool, PR_WAITOK);
2212 wd->wd_blkno = blk;
2213 wd->wd_len = len;
2214
2215 mutex_enter(&wl->wl_mtx);
2216 TAILQ_INSERT_TAIL(&wl->wl_dealloclist, wd, wd_entries);
2217
2218 if (cookiep)
2219 *cookiep = wd;
2220
2221 out:
2222 mutex_exit(&wl->wl_mtx);
2223
2224 WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
2225 ("wapbl_register_deallocation: blk=%"PRId64" len=%d error=%d\n",
2226 blk, len, error));
2227
2228 return error;
2229 }
2230
2231 static void
2232 wapbl_deallocation_free(struct wapbl *wl, struct wapbl_dealloc *wd,
2233 bool locked)
2234 {
2235 KASSERT(!locked
2236 || rw_lock_held(&wl->wl_rwlock) || mutex_owned(&wl->wl_mtx));
2237
2238 if (!locked)
2239 mutex_enter(&wl->wl_mtx);
2240
2241 TAILQ_REMOVE(&wl->wl_dealloclist, wd, wd_entries);
2242 wl->wl_dealloccnt--;
2243
2244 if (!locked)
2245 mutex_exit(&wl->wl_mtx);
2246
2247 pool_put(&wapbl_dealloc_pool, wd);
2248 }
2249
2250 void
2251 wapbl_unregister_deallocation(struct wapbl *wl, void *cookie)
2252 {
2253 KASSERT(cookie != NULL);
2254 wapbl_deallocation_free(wl, cookie, false);
2255 }
2256
2257 /****************************************************************/
2258
2259 static void
2260 wapbl_inodetrk_init(struct wapbl *wl, u_int size)
2261 {
2262
2263 wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
2264 if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) {
2265 pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0,
2266 "wapblinopl", &pool_allocator_nointr, IPL_NONE);
2267 }
2268 }
2269
2270 static void
2271 wapbl_inodetrk_free(struct wapbl *wl)
2272 {
2273
2274 /* XXX this KASSERT needs locking/mutex analysis */
2275 KASSERT(wl->wl_inohashcnt == 0);
2276 hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
2277 if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) {
2278 pool_destroy(&wapbl_ino_pool);
2279 }
2280 }
2281
2282 static struct wapbl_ino *
2283 wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
2284 {
2285 struct wapbl_ino_head *wih;
2286 struct wapbl_ino *wi;
2287
2288 KASSERT(mutex_owned(&wl->wl_mtx));
2289
2290 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
2291 LIST_FOREACH(wi, wih, wi_hash) {
2292 if (ino == wi->wi_ino)
2293 return wi;
2294 }
2295 return 0;
2296 }
2297
2298 void
2299 wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
2300 {
2301 struct wapbl_ino_head *wih;
2302 struct wapbl_ino *wi;
2303
2304 wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
2305
2306 mutex_enter(&wl->wl_mtx);
2307 if (wapbl_inodetrk_get(wl, ino) == NULL) {
2308 wi->wi_ino = ino;
2309 wi->wi_mode = mode;
2310 wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
2311 LIST_INSERT_HEAD(wih, wi, wi_hash);
2312 wl->wl_inohashcnt++;
2313 WAPBL_PRINTF(WAPBL_PRINT_INODE,
2314 ("wapbl_register_inode: ino=%"PRId64"\n", ino));
2315 mutex_exit(&wl->wl_mtx);
2316 } else {
2317 mutex_exit(&wl->wl_mtx);
2318 pool_put(&wapbl_ino_pool, wi);
2319 }
2320 }
2321
2322 void
2323 wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
2324 {
2325 struct wapbl_ino *wi;
2326
2327 mutex_enter(&wl->wl_mtx);
2328 wi = wapbl_inodetrk_get(wl, ino);
2329 if (wi) {
2330 WAPBL_PRINTF(WAPBL_PRINT_INODE,
2331 ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
2332 KASSERT(wl->wl_inohashcnt > 0);
2333 wl->wl_inohashcnt--;
2334 LIST_REMOVE(wi, wi_hash);
2335 mutex_exit(&wl->wl_mtx);
2336
2337 pool_put(&wapbl_ino_pool, wi);
2338 } else {
2339 mutex_exit(&wl->wl_mtx);
2340 }
2341 }
2342
2343 /****************************************************************/
2344
2345 /*
2346 * wapbl_transaction_inodes_len(wl)
2347 *
2348 * Calculate the number of bytes required for inode registration
2349 * log records in wl.
2350 */
2351 static inline size_t
2352 wapbl_transaction_inodes_len(struct wapbl *wl)
2353 {
2354 int blocklen = 1<<wl->wl_log_dev_bshift;
2355 int iph;
2356
2357 /* Calculate number of inodes described in a inodelist header */
2358 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2359 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2360
2361 KASSERT(iph > 0);
2362
2363 return MAX(1, howmany(wl->wl_inohashcnt, iph)) * blocklen;
2364 }
2365
2366
2367 /*
2368 * wapbl_transaction_len(wl)
2369 *
2370 * Calculate number of bytes required for all log records in wl.
2371 */
2372 static size_t
2373 wapbl_transaction_len(struct wapbl *wl)
2374 {
2375 int blocklen = 1<<wl->wl_log_dev_bshift;
2376 size_t len;
2377
2378 /* Calculate number of blocks described in a blocklist header */
2379 len = wl->wl_bcount;
2380 len += howmany(wl->wl_bufcount, wl->wl_brperjblock) * blocklen;
2381 len += howmany(wl->wl_dealloccnt, wl->wl_brperjblock) * blocklen;
2382 len += wapbl_transaction_inodes_len(wl);
2383
2384 return len;
2385 }
2386
2387 /*
2388 * wapbl_cache_sync(wl, msg)
2389 *
2390 * Issue DIOCCACHESYNC to wl->wl_devvp.
2391 *
2392 * If sysctl(vfs.wapbl.verbose_commit) >= 2, print a message
2393 * including msg about the duration of the cache sync.
2394 */
2395 static int
2396 wapbl_cache_sync(struct wapbl *wl, const char *msg)
2397 {
2398 const bool verbose = wapbl_verbose_commit >= 2;
2399 struct bintime start_time;
2400 int force = 1;
2401 int error;
2402
2403 /* Skip full cache sync if disabled */
2404 if (!wapbl_flush_disk_cache) {
2405 return 0;
2406 }
2407 if (verbose) {
2408 bintime(&start_time);
2409 }
2410 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force,
2411 FWRITE, FSCRED);
2412 if (error) {
2413 WAPBL_PRINTF(WAPBL_PRINT_ERROR,
2414 ("wapbl_cache_sync: DIOCCACHESYNC on dev 0x%jx "
2415 "returned %d\n", (uintmax_t)wl->wl_devvp->v_rdev, error));
2416 }
2417 if (verbose) {
2418 struct bintime d;
2419 struct timespec ts;
2420
2421 bintime(&d);
2422 bintime_sub(&d, &start_time);
2423 bintime2timespec(&d, &ts);
2424 printf("wapbl_cache_sync: %s: dev 0x%jx %ju.%09lu\n",
2425 msg, (uintmax_t)wl->wl_devvp->v_rdev,
2426 (uintmax_t)ts.tv_sec, ts.tv_nsec);
2427 }
2428
2429 wl->wl_ev_cacheflush.ev_count++;
2430
2431 return error;
2432 }
2433
2434 /*
2435 * wapbl_write_commit(wl, head, tail)
2436 *
2437 * Issue a disk cache sync to wait for all pending writes to the
2438 * log to complete, and then synchronously commit the current
2439 * circular queue head and tail to the log, in the next of two
2440 * locations for commit headers on disk.
2441 *
2442 * Increment the generation number. If the generation number
2443 * rolls over to zero, then a subsequent commit would appear to
2444 * have an older generation than this one -- in that case, issue a
2445 * duplicate commit to avoid this.
2446 *
2447 * => Caller must have exclusive access to wl, either by holding
2448 * wl->wl_rwlock for writer or by being wapbl_start before anyone
2449 * else has seen wl.
2450 */
2451 static int
2452 wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
2453 {
2454 struct wapbl_wc_header *wc = wl->wl_wc_header;
2455 struct timespec ts;
2456 int error;
2457 daddr_t pbn;
2458
2459 error = wapbl_buffered_flush(wl, true);
2460 if (error)
2461 return error;
2462 /*
2463 * Flush disk cache to ensure that blocks we've written are actually
2464 * written to the stable storage before the commit header.
2465 * This flushes to disk not only journal blocks, but also all
2466 * metadata blocks, written asynchronously since previous commit.
2467 *
2468 * XXX Calc checksum here, instead we do this for now
2469 */
2470 wapbl_cache_sync(wl, "1");
2471
2472 wc->wc_head = head;
2473 wc->wc_tail = tail;
2474 wc->wc_checksum = 0;
2475 wc->wc_version = 1;
2476 getnanotime(&ts);
2477 wc->wc_time = ts.tv_sec;
2478 wc->wc_timensec = ts.tv_nsec;
2479
2480 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2481 ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
2482 (intmax_t)head, (intmax_t)tail));
2483
2484 /*
2485 * write the commit header.
2486 *
2487 * XXX if generation will rollover, then first zero
2488 * over second commit header before trying to write both headers.
2489 */
2490
2491 pbn = wl->wl_logpbn + (wc->wc_generation % 2);
2492 #ifdef _KERNEL
2493 pbn = btodb(pbn << wc->wc_log_dev_bshift);
2494 #endif
2495 error = wapbl_buffered_write(wc, wc->wc_len, wl, pbn, WAPBL_JFLAGS(wl));
2496 if (error)
2497 return error;
2498 error = wapbl_buffered_flush(wl, true);
2499 if (error)
2500 return error;
2501
2502 /*
2503 * Flush disk cache to ensure that the commit header is actually
2504 * written before meta data blocks. Commit block is written using
2505 * FUA when enabled, in that case this flush is not needed.
2506 */
2507 if (!WAPBL_USE_FUA(wl))
2508 wapbl_cache_sync(wl, "2");
2509
2510 /*
2511 * If the generation number was zero, write it out a second time.
2512 * This handles initialization and generation number rollover
2513 */
2514 if (wc->wc_generation++ == 0) {
2515 error = wapbl_write_commit(wl, head, tail);
2516 /*
2517 * This panic should be able to be removed if we do the
2518 * zero'ing mentioned above, and we are certain to roll
2519 * back generation number on failure.
2520 */
2521 if (error)
2522 panic("wapbl_write_commit: error writing duplicate "
2523 "log header: %d", error);
2524 }
2525
2526 wl->wl_ev_commit.ev_count++;
2527
2528 return 0;
2529 }
2530
2531 /*
2532 * wapbl_write_blocks(wl, offp)
2533 *
2534 * Write all pending physical blocks in the current transaction
2535 * from wapbl_add_buf to the log on disk, adding to the circular
2536 * queue head at byte offset *offp, and returning the new head's
2537 * byte offset in *offp.
2538 */
2539 static int
2540 wapbl_write_blocks(struct wapbl *wl, off_t *offp)
2541 {
2542 struct wapbl_wc_blocklist *wc =
2543 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2544 int blocklen = 1<<wl->wl_log_dev_bshift;
2545 struct buf *bp;
2546 off_t off = *offp;
2547 int error;
2548 size_t padding;
2549
2550 KASSERT(rw_write_held(&wl->wl_rwlock));
2551
2552 bp = TAILQ_FIRST(&wl->wl_bufs);
2553
2554 while (bp) {
2555 int cnt;
2556 struct buf *obp = bp;
2557
2558 KASSERT(bp->b_flags & B_LOCKED);
2559
2560 wc->wc_type = WAPBL_WC_BLOCKS;
2561 wc->wc_len = blocklen;
2562 wc->wc_blkcount = 0;
2563 while (bp && (wc->wc_blkcount < wl->wl_brperjblock)) {
2564 /*
2565 * Make sure all the physical block numbers are up to
2566 * date. If this is not always true on a given
2567 * filesystem, then VOP_BMAP must be called. We
2568 * could call VOP_BMAP here, or else in the filesystem
2569 * specific flush callback, although neither of those
2570 * solutions allow us to take the vnode lock. If a
2571 * filesystem requires that we must take the vnode lock
2572 * to call VOP_BMAP, then we can probably do it in
2573 * bwrite when the vnode lock should already be held
2574 * by the invoking code.
2575 */
2576 KASSERT((bp->b_vp->v_type == VBLK) ||
2577 (bp->b_blkno != bp->b_lblkno));
2578 KASSERT(bp->b_blkno > 0);
2579
2580 wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
2581 wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
2582 wc->wc_len += bp->b_bcount;
2583 wc->wc_blkcount++;
2584 bp = TAILQ_NEXT(bp, b_wapbllist);
2585 }
2586 if (wc->wc_len % blocklen != 0) {
2587 padding = blocklen - wc->wc_len % blocklen;
2588 wc->wc_len += padding;
2589 } else {
2590 padding = 0;
2591 }
2592
2593 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2594 ("wapbl_write_blocks: len = %u (padding %zu) off = %"PRIdMAX"\n",
2595 wc->wc_len, padding, (intmax_t)off));
2596
2597 error = wapbl_circ_write(wl, wc, blocklen, &off);
2598 if (error)
2599 return error;
2600 bp = obp;
2601 cnt = 0;
2602 while (bp && (cnt++ < wl->wl_brperjblock)) {
2603 error = wapbl_circ_write(wl, bp->b_data,
2604 bp->b_bcount, &off);
2605 if (error)
2606 return error;
2607 bp = TAILQ_NEXT(bp, b_wapbllist);
2608 }
2609 if (padding) {
2610 void *zero;
2611
2612 zero = wapbl_alloc(padding);
2613 memset(zero, 0, padding);
2614 error = wapbl_circ_write(wl, zero, padding, &off);
2615 wapbl_free(zero, padding);
2616 if (error)
2617 return error;
2618 }
2619 }
2620 *offp = off;
2621 return 0;
2622 }
2623
2624 /*
2625 * wapbl_write_revocations(wl, offp)
2626 *
2627 * Write all pending deallocations in the current transaction from
2628 * wapbl_register_deallocation to the log on disk, adding to the
2629 * circular queue's head at byte offset *offp, and returning the
2630 * new head's byte offset in *offp.
2631 */
2632 static int
2633 wapbl_write_revocations(struct wapbl *wl, off_t *offp)
2634 {
2635 struct wapbl_wc_blocklist *wc =
2636 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2637 struct wapbl_dealloc *wd, *lwd;
2638 int blocklen = 1<<wl->wl_log_dev_bshift;
2639 off_t off = *offp;
2640 int error;
2641
2642 KASSERT(rw_write_held(&wl->wl_rwlock));
2643
2644 if (wl->wl_dealloccnt == 0)
2645 return 0;
2646
2647 while ((wd = TAILQ_FIRST(&wl->wl_dealloclist)) != NULL) {
2648 wc->wc_type = WAPBL_WC_REVOCATIONS;
2649 wc->wc_len = blocklen;
2650 wc->wc_blkcount = 0;
2651 while (wd && (wc->wc_blkcount < wl->wl_brperjblock)) {
2652 wc->wc_blocks[wc->wc_blkcount].wc_daddr =
2653 wd->wd_blkno;
2654 wc->wc_blocks[wc->wc_blkcount].wc_dlen =
2655 wd->wd_len;
2656 wc->wc_blkcount++;
2657
2658 wd = TAILQ_NEXT(wd, wd_entries);
2659 }
2660 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2661 ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
2662 wc->wc_len, (intmax_t)off));
2663 error = wapbl_circ_write(wl, wc, blocklen, &off);
2664 if (error)
2665 return error;
2666
2667 /* free all successfully written deallocs */
2668 lwd = wd;
2669 while ((wd = TAILQ_FIRST(&wl->wl_dealloclist)) != NULL) {
2670 if (wd == lwd)
2671 break;
2672 wapbl_deallocation_free(wl, wd, true);
2673 }
2674 }
2675 *offp = off;
2676 return 0;
2677 }
2678
2679 /*
2680 * wapbl_write_inodes(wl, offp)
2681 *
2682 * Write all pending inode allocations in the current transaction
2683 * from wapbl_register_inode to the log on disk, adding to the
2684 * circular queue's head at byte offset *offp and returning the
2685 * new head's byte offset in *offp.
2686 */
2687 static int
2688 wapbl_write_inodes(struct wapbl *wl, off_t *offp)
2689 {
2690 struct wapbl_wc_inodelist *wc =
2691 (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
2692 int i;
2693 int blocklen = 1 << wl->wl_log_dev_bshift;
2694 off_t off = *offp;
2695 int error;
2696
2697 struct wapbl_ino_head *wih;
2698 struct wapbl_ino *wi;
2699 int iph;
2700
2701 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2702 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2703
2704 i = 0;
2705 wih = &wl->wl_inohash[0];
2706 wi = 0;
2707 do {
2708 wc->wc_type = WAPBL_WC_INODES;
2709 wc->wc_len = blocklen;
2710 wc->wc_inocnt = 0;
2711 wc->wc_clear = (i == 0);
2712 while ((i < wl->wl_inohashcnt) && (wc->wc_inocnt < iph)) {
2713 while (!wi) {
2714 KASSERT((wih - &wl->wl_inohash[0])
2715 <= wl->wl_inohashmask);
2716 wi = LIST_FIRST(wih++);
2717 }
2718 wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2719 wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2720 wc->wc_inocnt++;
2721 i++;
2722 wi = LIST_NEXT(wi, wi_hash);
2723 }
2724 WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2725 ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2726 wc->wc_len, (intmax_t)off));
2727 error = wapbl_circ_write(wl, wc, blocklen, &off);
2728 if (error)
2729 return error;
2730 } while (i < wl->wl_inohashcnt);
2731
2732 *offp = off;
2733 return 0;
2734 }
2735
2736 #endif /* _KERNEL */
2737
2738 /****************************************************************/
2739
2740 struct wapbl_blk {
2741 LIST_ENTRY(wapbl_blk) wb_hash;
2742 daddr_t wb_blk;
2743 off_t wb_off; /* Offset of this block in the log */
2744 };
2745 #define WAPBL_BLKPOOL_MIN 83
2746
2747 static void
2748 wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2749 {
2750 if (size < WAPBL_BLKPOOL_MIN)
2751 size = WAPBL_BLKPOOL_MIN;
2752 KASSERT(wr->wr_blkhash == 0);
2753 #ifdef _KERNEL
2754 wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2755 #else /* ! _KERNEL */
2756 /* Manually implement hashinit */
2757 {
2758 unsigned long i, hashsize;
2759 for (hashsize = 1; hashsize < size; hashsize <<= 1)
2760 continue;
2761 wr->wr_blkhash = wapbl_alloc(hashsize * sizeof(*wr->wr_blkhash));
2762 for (i = 0; i < hashsize; i++)
2763 LIST_INIT(&wr->wr_blkhash[i]);
2764 wr->wr_blkhashmask = hashsize - 1;
2765 }
2766 #endif /* ! _KERNEL */
2767 }
2768
2769 static void
2770 wapbl_blkhash_free(struct wapbl_replay *wr)
2771 {
2772 KASSERT(wr->wr_blkhashcnt == 0);
2773 #ifdef _KERNEL
2774 hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2775 #else /* ! _KERNEL */
2776 wapbl_free(wr->wr_blkhash,
2777 (wr->wr_blkhashmask + 1) * sizeof(*wr->wr_blkhash));
2778 #endif /* ! _KERNEL */
2779 }
2780
2781 static struct wapbl_blk *
2782 wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2783 {
2784 struct wapbl_blk_head *wbh;
2785 struct wapbl_blk *wb;
2786 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2787 LIST_FOREACH(wb, wbh, wb_hash) {
2788 if (blk == wb->wb_blk)
2789 return wb;
2790 }
2791 return 0;
2792 }
2793
2794 static void
2795 wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2796 {
2797 struct wapbl_blk_head *wbh;
2798 struct wapbl_blk *wb;
2799 wb = wapbl_blkhash_get(wr, blk);
2800 if (wb) {
2801 KASSERT(wb->wb_blk == blk);
2802 wb->wb_off = off;
2803 } else {
2804 wb = wapbl_alloc(sizeof(*wb));
2805 wb->wb_blk = blk;
2806 wb->wb_off = off;
2807 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2808 LIST_INSERT_HEAD(wbh, wb, wb_hash);
2809 wr->wr_blkhashcnt++;
2810 }
2811 }
2812
2813 static void
2814 wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2815 {
2816 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2817 if (wb) {
2818 KASSERT(wr->wr_blkhashcnt > 0);
2819 wr->wr_blkhashcnt--;
2820 LIST_REMOVE(wb, wb_hash);
2821 wapbl_free(wb, sizeof(*wb));
2822 }
2823 }
2824
2825 static void
2826 wapbl_blkhash_clear(struct wapbl_replay *wr)
2827 {
2828 unsigned long i;
2829 for (i = 0; i <= wr->wr_blkhashmask; i++) {
2830 struct wapbl_blk *wb;
2831
2832 while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2833 KASSERT(wr->wr_blkhashcnt > 0);
2834 wr->wr_blkhashcnt--;
2835 LIST_REMOVE(wb, wb_hash);
2836 wapbl_free(wb, sizeof(*wb));
2837 }
2838 }
2839 KASSERT(wr->wr_blkhashcnt == 0);
2840 }
2841
2842 /****************************************************************/
2843
2844 /*
2845 * wapbl_circ_read(wr, data, len, offp)
2846 *
2847 * Read len bytes into data from the circular queue of wr,
2848 * starting at the linear byte offset *offp, and returning the new
2849 * linear byte offset in *offp.
2850 *
2851 * If the starting linear byte offset precedes wr->wr_circ_off,
2852 * the read instead begins at wr->wr_circ_off. XXX WTF? This
2853 * should be a KASSERT, not a conditional.
2854 */
2855 static int
2856 wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2857 {
2858 size_t slen;
2859 off_t off = *offp;
2860 int error;
2861 daddr_t pbn;
2862
2863 KASSERT(((len >> wr->wr_log_dev_bshift) <<
2864 wr->wr_log_dev_bshift) == len);
2865
2866 if (off < wr->wr_circ_off)
2867 off = wr->wr_circ_off;
2868 slen = wr->wr_circ_off + wr->wr_circ_size - off;
2869 if (slen < len) {
2870 pbn = wr->wr_logpbn + (off >> wr->wr_log_dev_bshift);
2871 #ifdef _KERNEL
2872 pbn = btodb(pbn << wr->wr_log_dev_bshift);
2873 #endif
2874 error = wapbl_read(data, slen, wr->wr_devvp, pbn);
2875 if (error)
2876 return error;
2877 data = (uint8_t *)data + slen;
2878 len -= slen;
2879 off = wr->wr_circ_off;
2880 }
2881 pbn = wr->wr_logpbn + (off >> wr->wr_log_dev_bshift);
2882 #ifdef _KERNEL
2883 pbn = btodb(pbn << wr->wr_log_dev_bshift);
2884 #endif
2885 error = wapbl_read(data, len, wr->wr_devvp, pbn);
2886 if (error)
2887 return error;
2888 off += len;
2889 if (off >= wr->wr_circ_off + wr->wr_circ_size)
2890 off = wr->wr_circ_off;
2891 *offp = off;
2892 return 0;
2893 }
2894
2895 /*
2896 * wapbl_circ_advance(wr, len, offp)
2897 *
2898 * Compute the linear byte offset of the circular queue of wr that
2899 * is len bytes past *offp, and store it in *offp.
2900 *
2901 * This is as if wapbl_circ_read, but without actually reading
2902 * anything.
2903 *
2904 * If the starting linear byte offset precedes wr->wr_circ_off, it
2905 * is taken to be wr->wr_circ_off instead. XXX WTF? This should
2906 * be a KASSERT, not a conditional.
2907 */
2908 static void
2909 wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2910 {
2911 size_t slen;
2912 off_t off = *offp;
2913
2914 KASSERT(((len >> wr->wr_log_dev_bshift) <<
2915 wr->wr_log_dev_bshift) == len);
2916
2917 if (off < wr->wr_circ_off)
2918 off = wr->wr_circ_off;
2919 slen = wr->wr_circ_off + wr->wr_circ_size - off;
2920 if (slen < len) {
2921 len -= slen;
2922 off = wr->wr_circ_off;
2923 }
2924 off += len;
2925 if (off >= wr->wr_circ_off + wr->wr_circ_size)
2926 off = wr->wr_circ_off;
2927 *offp = off;
2928 }
2929
2930 /****************************************************************/
2931
2932 int
2933 wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2934 daddr_t off, size_t count, size_t blksize)
2935 {
2936 struct wapbl_replay *wr;
2937 int error;
2938 struct vnode *devvp;
2939 daddr_t logpbn;
2940 uint8_t *scratch;
2941 struct wapbl_wc_header *wch;
2942 struct wapbl_wc_header *wch2;
2943 /* Use this until we read the actual log header */
2944 int log_dev_bshift = ilog2(blksize);
2945 size_t used;
2946 daddr_t pbn;
2947
2948 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2949 ("wapbl_replay_start: vp=%p off=%"PRId64 " count=%zu blksize=%zu\n",
2950 vp, off, count, blksize));
2951
2952 if (off < 0)
2953 return EINVAL;
2954
2955 if (blksize < DEV_BSIZE)
2956 return EINVAL;
2957 if (blksize % DEV_BSIZE)
2958 return EINVAL;
2959
2960 #ifdef _KERNEL
2961 #if 0
2962 /* XXX vp->v_size isn't reliably set for VBLK devices,
2963 * especially root. However, we might still want to verify
2964 * that the full load is readable */
2965 if ((off + count) * blksize > vp->v_size)
2966 return EINVAL;
2967 #endif
2968 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2969 return error;
2970 }
2971 #else /* ! _KERNEL */
2972 devvp = vp;
2973 logpbn = off;
2974 #endif /* ! _KERNEL */
2975
2976 scratch = wapbl_alloc(MAXBSIZE);
2977
2978 pbn = logpbn;
2979 #ifdef _KERNEL
2980 pbn = btodb(pbn << log_dev_bshift);
2981 #endif
2982 error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, pbn);
2983 if (error)
2984 goto errout;
2985
2986 wch = (struct wapbl_wc_header *)scratch;
2987 wch2 =
2988 (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
2989 /* XXX verify checksums and magic numbers */
2990 if (wch->wc_type != WAPBL_WC_HEADER) {
2991 printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
2992 error = EFTYPE;
2993 goto errout;
2994 }
2995
2996 if (wch2->wc_generation > wch->wc_generation)
2997 wch = wch2;
2998
2999 wr = wapbl_calloc(1, sizeof(*wr));
3000
3001 wr->wr_logvp = vp;
3002 wr->wr_devvp = devvp;
3003 wr->wr_logpbn = logpbn;
3004
3005 wr->wr_scratch = scratch;
3006
3007 wr->wr_log_dev_bshift = wch->wc_log_dev_bshift;
3008 wr->wr_fs_dev_bshift = wch->wc_fs_dev_bshift;
3009 wr->wr_circ_off = wch->wc_circ_off;
3010 wr->wr_circ_size = wch->wc_circ_size;
3011 wr->wr_generation = wch->wc_generation;
3012
3013 used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
3014
3015 WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
3016 ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
3017 " len=%"PRId64" used=%zu\n",
3018 wch->wc_head, wch->wc_tail, wch->wc_circ_off,
3019 wch->wc_circ_size, used));
3020
3021 wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
3022
3023 error = wapbl_replay_process(wr, wch->wc_head, wch->wc_tail);
3024 if (error) {
3025 wapbl_replay_stop(wr);
3026 wapbl_replay_free(wr);
3027 return error;
3028 }
3029
3030 *wrp = wr;
3031 return 0;
3032
3033 errout:
3034 wapbl_free(scratch, MAXBSIZE);
3035 return error;
3036 }
3037
3038 void
3039 wapbl_replay_stop(struct wapbl_replay *wr)
3040 {
3041
3042 if (!wapbl_replay_isopen(wr))
3043 return;
3044
3045 WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
3046
3047 wapbl_free(wr->wr_scratch, MAXBSIZE);
3048 wr->wr_scratch = NULL;
3049
3050 wr->wr_logvp = NULL;
3051
3052 wapbl_blkhash_clear(wr);
3053 wapbl_blkhash_free(wr);
3054 }
3055
3056 void
3057 wapbl_replay_free(struct wapbl_replay *wr)
3058 {
3059
3060 KDASSERT(!wapbl_replay_isopen(wr));
3061
3062 if (wr->wr_inodes)
3063 wapbl_free(wr->wr_inodes,
3064 wr->wr_inodescnt * sizeof(wr->wr_inodes[0]));
3065 wapbl_free(wr, sizeof(*wr));
3066 }
3067
3068 #ifdef _KERNEL
3069 int
3070 wapbl_replay_isopen1(struct wapbl_replay *wr)
3071 {
3072
3073 return wapbl_replay_isopen(wr);
3074 }
3075 #endif
3076
3077 /*
3078 * calculate the disk address for the i'th block in the wc_blockblist
3079 * offset by j blocks of size blen.
3080 *
3081 * wc_daddr is always a kernel disk address in DEV_BSIZE units that
3082 * was written to the journal.
3083 *
3084 * The kernel needs that address plus the offset in DEV_BSIZE units.
3085 *
3086 * Userland needs that address plus the offset in blen units.
3087 *
3088 */
3089 static daddr_t
3090 wapbl_block_daddr(struct wapbl_wc_blocklist *wc, int i, int j, int blen)
3091 {
3092 daddr_t pbn;
3093
3094 #ifdef _KERNEL
3095 pbn = wc->wc_blocks[i].wc_daddr + btodb(j * blen);
3096 #else
3097 pbn = dbtob(wc->wc_blocks[i].wc_daddr) / blen + j;
3098 #endif
3099
3100 return pbn;
3101 }
3102
3103 static void
3104 wapbl_replay_process_blocks(struct wapbl_replay *wr, off_t *offp)
3105 {
3106 struct wapbl_wc_blocklist *wc =
3107 (struct wapbl_wc_blocklist *)wr->wr_scratch;
3108 int fsblklen = 1 << wr->wr_fs_dev_bshift;
3109 int i, j, n;
3110
3111 for (i = 0; i < wc->wc_blkcount; i++) {
3112 /*
3113 * Enter each physical block into the hashtable independently.
3114 */
3115 n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
3116 for (j = 0; j < n; j++) {
3117 wapbl_blkhash_ins(wr, wapbl_block_daddr(wc, i, j, fsblklen),
3118 *offp);
3119 wapbl_circ_advance(wr, fsblklen, offp);
3120 }
3121 }
3122 }
3123
3124 static void
3125 wapbl_replay_process_revocations(struct wapbl_replay *wr)
3126 {
3127 struct wapbl_wc_blocklist *wc =
3128 (struct wapbl_wc_blocklist *)wr->wr_scratch;
3129 int fsblklen = 1 << wr->wr_fs_dev_bshift;
3130 int i, j, n;
3131
3132 for (i = 0; i < wc->wc_blkcount; i++) {
3133 /*
3134 * Remove any blocks found from the hashtable.
3135 */
3136 n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
3137 for (j = 0; j < n; j++)
3138 wapbl_blkhash_rem(wr, wapbl_block_daddr(wc, i, j, fsblklen));
3139 }
3140 }
3141
3142 static void
3143 wapbl_replay_process_inodes(struct wapbl_replay *wr, off_t oldoff, off_t newoff)
3144 {
3145 struct wapbl_wc_inodelist *wc =
3146 (struct wapbl_wc_inodelist *)wr->wr_scratch;
3147 void *new_inodes;
3148 const size_t oldsize = wr->wr_inodescnt * sizeof(wr->wr_inodes[0]);
3149
3150 KASSERT(sizeof(wr->wr_inodes[0]) == sizeof(wc->wc_inodes[0]));
3151
3152 /*
3153 * Keep track of where we found this so location won't be
3154 * overwritten.
3155 */
3156 if (wc->wc_clear) {
3157 wr->wr_inodestail = oldoff;
3158 wr->wr_inodescnt = 0;
3159 if (wr->wr_inodes != NULL) {
3160 wapbl_free(wr->wr_inodes, oldsize);
3161 wr->wr_inodes = NULL;
3162 }
3163 }
3164 wr->wr_inodeshead = newoff;
3165 if (wc->wc_inocnt == 0)
3166 return;
3167
3168 new_inodes = wapbl_alloc((wr->wr_inodescnt + wc->wc_inocnt) *
3169 sizeof(wr->wr_inodes[0]));
3170 if (wr->wr_inodes != NULL) {
3171 memcpy(new_inodes, wr->wr_inodes, oldsize);
3172 wapbl_free(wr->wr_inodes, oldsize);
3173 }
3174 wr->wr_inodes = new_inodes;
3175 memcpy(&wr->wr_inodes[wr->wr_inodescnt], wc->wc_inodes,
3176 wc->wc_inocnt * sizeof(wr->wr_inodes[0]));
3177 wr->wr_inodescnt += wc->wc_inocnt;
3178 }
3179
3180 static int
3181 wapbl_replay_process(struct wapbl_replay *wr, off_t head, off_t tail)
3182 {
3183 off_t off;
3184 int error;
3185
3186 int logblklen = 1 << wr->wr_log_dev_bshift;
3187
3188 wapbl_blkhash_clear(wr);
3189
3190 off = tail;
3191 while (off != head) {
3192 struct wapbl_wc_null *wcn;
3193 off_t saveoff = off;
3194 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
3195 if (error)
3196 goto errout;
3197 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
3198 switch (wcn->wc_type) {
3199 case WAPBL_WC_BLOCKS:
3200 wapbl_replay_process_blocks(wr, &off);
3201 break;
3202
3203 case WAPBL_WC_REVOCATIONS:
3204 wapbl_replay_process_revocations(wr);
3205 break;
3206
3207 case WAPBL_WC_INODES:
3208 wapbl_replay_process_inodes(wr, saveoff, off);
3209 break;
3210
3211 default:
3212 printf("Unrecognized wapbl type: 0x%08x\n",
3213 wcn->wc_type);
3214 error = EFTYPE;
3215 goto errout;
3216 }
3217 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
3218 if (off != saveoff) {
3219 printf("wapbl_replay: corrupted records\n");
3220 error = EFTYPE;
3221 goto errout;
3222 }
3223 }
3224 return 0;
3225
3226 errout:
3227 wapbl_blkhash_clear(wr);
3228 return error;
3229 }
3230
3231 #if 0
3232 int
3233 wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
3234 {
3235 off_t off;
3236 int mismatchcnt = 0;
3237 int logblklen = 1 << wr->wr_log_dev_bshift;
3238 int fsblklen = 1 << wr->wr_fs_dev_bshift;
3239 void *scratch1 = wapbl_alloc(MAXBSIZE);
3240 void *scratch2 = wapbl_alloc(MAXBSIZE);
3241 int error = 0;
3242
3243 KDASSERT(wapbl_replay_isopen(wr));
3244
3245 off = wch->wc_tail;
3246 while (off != wch->wc_head) {
3247 struct wapbl_wc_null *wcn;
3248 #ifdef DEBUG
3249 off_t saveoff = off;
3250 #endif
3251 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
3252 if (error)
3253 goto out;
3254 wcn = (struct wapbl_wc_null *)wr->wr_scratch;
3255 switch (wcn->wc_type) {
3256 case WAPBL_WC_BLOCKS:
3257 {
3258 struct wapbl_wc_blocklist *wc =
3259 (struct wapbl_wc_blocklist *)wr->wr_scratch;
3260 int i;
3261 for (i = 0; i < wc->wc_blkcount; i++) {
3262 int foundcnt = 0;
3263 int dirtycnt = 0;
3264 int j, n;
3265 /*
3266 * Check each physical block into the
3267 * hashtable independently
3268 */
3269 n = wc->wc_blocks[i].wc_dlen >>
3270 wch->wc_fs_dev_bshift;
3271 for (j = 0; j < n; j++) {
3272 struct wapbl_blk *wb =
3273 wapbl_blkhash_get(wr,
3274 wapbl_block_daddr(wc, i, j, fsblklen));
3275 if (wb && (wb->wb_off == off)) {
3276 foundcnt++;
3277 error =
3278 wapbl_circ_read(wr,
3279 scratch1, fsblklen,
3280 &off);
3281 if (error)
3282 goto out;
3283 error =
3284 wapbl_read(scratch2,
3285 fsblklen, fsdevvp,
3286 wb->wb_blk);
3287 if (error)
3288 goto out;
3289 if (memcmp(scratch1,
3290 scratch2,
3291 fsblklen)) {
3292 printf(
3293 "wapbl_verify: mismatch block %"PRId64" at off %"PRIdMAX"\n",
3294 wb->wb_blk, (intmax_t)off);
3295 dirtycnt++;
3296 mismatchcnt++;
3297 }
3298 } else {
3299 wapbl_circ_advance(wr,
3300 fsblklen, &off);
3301 }
3302 }
3303 #if 0
3304 /*
3305 * If all of the blocks in an entry
3306 * are clean, then remove all of its
3307 * blocks from the hashtable since they
3308 * never will need replay.
3309 */
3310 if ((foundcnt != 0) &&
3311 (dirtycnt == 0)) {
3312 off = saveoff;
3313 wapbl_circ_advance(wr,
3314 logblklen, &off);
3315 for (j = 0; j < n; j++) {
3316 struct wapbl_blk *wb =
3317 wapbl_blkhash_get(wr,
3318 wapbl_block_daddr(wc, i, j, fsblklen));
3319 if (wb &&
3320 (wb->wb_off == off)) {
3321 wapbl_blkhash_rem(wr, wb->wb_blk);
3322 }
3323 wapbl_circ_advance(wr,
3324 fsblklen, &off);
3325 }
3326 }
3327 #endif
3328 }
3329 }
3330 break;
3331 case WAPBL_WC_REVOCATIONS:
3332 case WAPBL_WC_INODES:
3333 break;
3334 default:
3335 KASSERT(0);
3336 }
3337 #ifdef DEBUG
3338 wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
3339 KASSERT(off == saveoff);
3340 #endif
3341 }
3342 out:
3343 wapbl_free(scratch1, MAXBSIZE);
3344 wapbl_free(scratch2, MAXBSIZE);
3345 if (!error && mismatchcnt)
3346 error = EFTYPE;
3347 return error;
3348 }
3349 #endif
3350
3351 int
3352 wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
3353 {
3354 struct wapbl_blk *wb;
3355 size_t i;
3356 off_t off;
3357 void *scratch;
3358 int error = 0;
3359 int fsblklen = 1 << wr->wr_fs_dev_bshift;
3360
3361 KDASSERT(wapbl_replay_isopen(wr));
3362
3363 scratch = wapbl_alloc(MAXBSIZE);
3364
3365 for (i = 0; i <= wr->wr_blkhashmask; ++i) {
3366 LIST_FOREACH(wb, &wr->wr_blkhash[i], wb_hash) {
3367 off = wb->wb_off;
3368 error = wapbl_circ_read(wr, scratch, fsblklen, &off);
3369 if (error)
3370 break;
3371 error = wapbl_write(scratch, fsblklen, fsdevvp,
3372 wb->wb_blk);
3373 if (error)
3374 break;
3375 }
3376 }
3377
3378 wapbl_free(scratch, MAXBSIZE);
3379 return error;
3380 }
3381
3382 int
3383 wapbl_replay_can_read(struct wapbl_replay *wr, daddr_t blk, long len)
3384 {
3385 int fsblklen = 1 << wr->wr_fs_dev_bshift;
3386
3387 KDASSERT(wapbl_replay_isopen(wr));
3388 KASSERT((len % fsblklen) == 0);
3389
3390 while (len != 0) {
3391 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
3392 if (wb)
3393 return 1;
3394 len -= fsblklen;
3395 }
3396 return 0;
3397 }
3398
3399 int
3400 wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
3401 {
3402 int fsblklen = 1 << wr->wr_fs_dev_bshift;
3403
3404 KDASSERT(wapbl_replay_isopen(wr));
3405
3406 KASSERT((len % fsblklen) == 0);
3407
3408 while (len != 0) {
3409 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
3410 if (wb) {
3411 off_t off = wb->wb_off;
3412 int error;
3413 error = wapbl_circ_read(wr, data, fsblklen, &off);
3414 if (error)
3415 return error;
3416 }
3417 data = (uint8_t *)data + fsblklen;
3418 len -= fsblklen;
3419 blk++;
3420 }
3421 return 0;
3422 }
3423
3424 #ifdef _KERNEL
3425
3426 MODULE(MODULE_CLASS_VFS, wapbl, NULL);
3427
3428 static int
3429 wapbl_modcmd(modcmd_t cmd, void *arg)
3430 {
3431
3432 switch (cmd) {
3433 case MODULE_CMD_INIT:
3434 wapbl_init();
3435 return 0;
3436 case MODULE_CMD_FINI:
3437 return wapbl_fini();
3438 default:
3439 return ENOTTY;
3440 }
3441 }
3442 #endif /* _KERNEL */
3443