vfs_wapbl.c revision 1.80 1 1.80 jdolecek /* $NetBSD: vfs_wapbl.c,v 1.80 2016/09/22 16:22:29 jdolecek Exp $ */
2 1.2 simonb
3 1.2 simonb /*-
4 1.23 ad * Copyright (c) 2003, 2008, 2009 The NetBSD Foundation, Inc.
5 1.2 simonb * All rights reserved.
6 1.2 simonb *
7 1.2 simonb * This code is derived from software contributed to The NetBSD Foundation
8 1.2 simonb * by Wasabi Systems, Inc.
9 1.2 simonb *
10 1.2 simonb * Redistribution and use in source and binary forms, with or without
11 1.2 simonb * modification, are permitted provided that the following conditions
12 1.2 simonb * are met:
13 1.2 simonb * 1. Redistributions of source code must retain the above copyright
14 1.2 simonb * notice, this list of conditions and the following disclaimer.
15 1.2 simonb * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 simonb * notice, this list of conditions and the following disclaimer in the
17 1.2 simonb * documentation and/or other materials provided with the distribution.
18 1.2 simonb *
19 1.2 simonb * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 simonb * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 simonb * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 simonb * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 simonb * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 simonb * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 simonb * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 simonb * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 simonb * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 simonb * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 simonb * POSSIBILITY OF SUCH DAMAGE.
30 1.2 simonb */
31 1.2 simonb
32 1.2 simonb /*
33 1.2 simonb * This implements file system independent write ahead filesystem logging.
34 1.2 simonb */
35 1.4 joerg
36 1.4 joerg #define WAPBL_INTERNAL
37 1.4 joerg
38 1.2 simonb #include <sys/cdefs.h>
39 1.80 jdolecek __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.80 2016/09/22 16:22:29 jdolecek Exp $");
40 1.2 simonb
41 1.2 simonb #include <sys/param.h>
42 1.31 mlelstv #include <sys/bitops.h>
43 1.68 riastrad #include <sys/time.h>
44 1.68 riastrad #include <sys/wapbl.h>
45 1.68 riastrad #include <sys/wapbl_replay.h>
46 1.2 simonb
47 1.2 simonb #ifdef _KERNEL
48 1.68 riastrad
49 1.68 riastrad #include <sys/atomic.h>
50 1.68 riastrad #include <sys/conf.h>
51 1.68 riastrad #include <sys/file.h>
52 1.68 riastrad #include <sys/kauth.h>
53 1.68 riastrad #include <sys/kernel.h>
54 1.68 riastrad #include <sys/module.h>
55 1.68 riastrad #include <sys/mount.h>
56 1.68 riastrad #include <sys/mutex.h>
57 1.2 simonb #include <sys/namei.h>
58 1.2 simonb #include <sys/proc.h>
59 1.68 riastrad #include <sys/resourcevar.h>
60 1.39 christos #include <sys/sysctl.h>
61 1.2 simonb #include <sys/uio.h>
62 1.2 simonb #include <sys/vnode.h>
63 1.2 simonb
64 1.2 simonb #include <miscfs/specfs/specdev.h>
65 1.2 simonb
66 1.51 para #define wapbl_alloc(s) kmem_alloc((s), KM_SLEEP)
67 1.51 para #define wapbl_free(a, s) kmem_free((a), (s))
68 1.51 para #define wapbl_calloc(n, s) kmem_zalloc((n)*(s), KM_SLEEP)
69 1.2 simonb
70 1.39 christos static struct sysctllog *wapbl_sysctl;
71 1.39 christos static int wapbl_flush_disk_cache = 1;
72 1.39 christos static int wapbl_verbose_commit = 0;
73 1.39 christos
74 1.57 joerg static inline size_t wapbl_space_free(size_t, off_t, off_t);
75 1.57 joerg
76 1.2 simonb #else /* !_KERNEL */
77 1.68 riastrad
78 1.2 simonb #include <assert.h>
79 1.2 simonb #include <errno.h>
80 1.68 riastrad #include <stdbool.h>
81 1.2 simonb #include <stdio.h>
82 1.2 simonb #include <stdlib.h>
83 1.2 simonb #include <string.h>
84 1.2 simonb
85 1.2 simonb #define KDASSERT(x) assert(x)
86 1.2 simonb #define KASSERT(x) assert(x)
87 1.51 para #define wapbl_alloc(s) malloc(s)
88 1.18 yamt #define wapbl_free(a, s) free(a)
89 1.2 simonb #define wapbl_calloc(n, s) calloc((n), (s))
90 1.2 simonb
91 1.2 simonb #endif /* !_KERNEL */
92 1.2 simonb
93 1.2 simonb /*
94 1.2 simonb * INTERNAL DATA STRUCTURES
95 1.2 simonb */
96 1.2 simonb
97 1.2 simonb /*
98 1.2 simonb * This structure holds per-mount log information.
99 1.2 simonb *
100 1.2 simonb * Legend: a = atomic access only
101 1.2 simonb * r = read-only after init
102 1.2 simonb * l = rwlock held
103 1.2 simonb * m = mutex held
104 1.38 hannken * lm = rwlock held writing or mutex held
105 1.2 simonb * u = unlocked access ok
106 1.2 simonb * b = bufcache_lock held
107 1.2 simonb */
108 1.60 matt LIST_HEAD(wapbl_ino_head, wapbl_ino);
109 1.2 simonb struct wapbl {
110 1.2 simonb struct vnode *wl_logvp; /* r: log here */
111 1.2 simonb struct vnode *wl_devvp; /* r: log on this device */
112 1.2 simonb struct mount *wl_mount; /* r: mountpoint wl is associated with */
113 1.2 simonb daddr_t wl_logpbn; /* r: Physical block number of start of log */
114 1.2 simonb int wl_log_dev_bshift; /* r: logarithm of device block size of log
115 1.2 simonb device */
116 1.2 simonb int wl_fs_dev_bshift; /* r: logarithm of device block size of
117 1.2 simonb filesystem device */
118 1.2 simonb
119 1.3 yamt unsigned wl_lock_count; /* m: Count of transactions in progress */
120 1.2 simonb
121 1.2 simonb size_t wl_circ_size; /* r: Number of bytes in buffer of log */
122 1.2 simonb size_t wl_circ_off; /* r: Number of bytes reserved at start */
123 1.2 simonb
124 1.2 simonb size_t wl_bufcount_max; /* r: Number of buffers reserved for log */
125 1.2 simonb size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */
126 1.2 simonb
127 1.2 simonb off_t wl_head; /* l: Byte offset of log head */
128 1.2 simonb off_t wl_tail; /* l: Byte offset of log tail */
129 1.2 simonb /*
130 1.71 riastrad * WAPBL log layout, stored on wl_devvp at wl_logpbn:
131 1.71 riastrad *
132 1.71 riastrad * ___________________ wl_circ_size __________________
133 1.71 riastrad * / \
134 1.71 riastrad * +---------+---------+-------+--------------+--------+
135 1.71 riastrad * [ commit0 | commit1 | CCWCW | EEEEEEEEEEEE | CCCWCW ]
136 1.71 riastrad * +---------+---------+-------+--------------+--------+
137 1.71 riastrad * wl_circ_off --^ ^-- wl_head ^-- wl_tail
138 1.71 riastrad *
139 1.71 riastrad * commit0 and commit1 are commit headers. A commit header has
140 1.71 riastrad * a generation number, indicating which of the two headers is
141 1.71 riastrad * more recent, and an assignment of head and tail pointers.
142 1.71 riastrad * The rest is a circular queue of log records, starting at
143 1.71 riastrad * the byte offset wl_circ_off.
144 1.71 riastrad *
145 1.71 riastrad * E marks empty space for records.
146 1.71 riastrad * W marks records for block writes issued but waiting.
147 1.71 riastrad * C marks completed records.
148 1.71 riastrad *
149 1.71 riastrad * wapbl_flush writes new records to empty `E' spaces after
150 1.71 riastrad * wl_head from the current transaction in memory.
151 1.71 riastrad *
152 1.71 riastrad * wapbl_truncate advances wl_tail past any completed `C'
153 1.71 riastrad * records, freeing them up for use.
154 1.71 riastrad *
155 1.71 riastrad * head == tail == 0 means log is empty.
156 1.71 riastrad * head == tail != 0 means log is full.
157 1.71 riastrad *
158 1.71 riastrad * See assertions in wapbl_advance() for other boundary
159 1.71 riastrad * conditions.
160 1.71 riastrad *
161 1.71 riastrad * Only wapbl_flush moves the head, except when wapbl_truncate
162 1.71 riastrad * sets it to 0 to indicate that the log is empty.
163 1.71 riastrad *
164 1.71 riastrad * Only wapbl_truncate moves the tail, except when wapbl_flush
165 1.71 riastrad * sets it to wl_circ_off to indicate that the log is full.
166 1.2 simonb */
167 1.2 simonb
168 1.2 simonb struct wapbl_wc_header *wl_wc_header; /* l */
169 1.2 simonb void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */
170 1.2 simonb
171 1.2 simonb kmutex_t wl_mtx; /* u: short-term lock */
172 1.2 simonb krwlock_t wl_rwlock; /* u: File system transaction lock */
173 1.2 simonb
174 1.2 simonb /*
175 1.2 simonb * Must be held while accessing
176 1.2 simonb * wl_count or wl_bufs or head or tail
177 1.2 simonb */
178 1.2 simonb
179 1.2 simonb /*
180 1.2 simonb * Callback called from within the flush routine to flush any extra
181 1.2 simonb * bits. Note that flush may be skipped without calling this if
182 1.2 simonb * there are no outstanding buffers in the transaction.
183 1.2 simonb */
184 1.5 joerg #if _KERNEL
185 1.2 simonb wapbl_flush_fn_t wl_flush; /* r */
186 1.2 simonb wapbl_flush_fn_t wl_flush_abort;/* r */
187 1.5 joerg #endif
188 1.2 simonb
189 1.2 simonb size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */
190 1.2 simonb size_t wl_bufcount; /* m: Count of buffers in wl_bufs */
191 1.2 simonb size_t wl_bcount; /* m: Total bcount of wl_bufs */
192 1.2 simonb
193 1.2 simonb LIST_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
194 1.2 simonb
195 1.2 simonb kcondvar_t wl_reclaimable_cv; /* m (obviously) */
196 1.2 simonb size_t wl_reclaimable_bytes; /* m: Amount of space available for
197 1.2 simonb reclamation by truncate */
198 1.2 simonb int wl_error_count; /* m: # of wl_entries with errors */
199 1.2 simonb size_t wl_reserved_bytes; /* never truncate log smaller than this */
200 1.2 simonb
201 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
202 1.2 simonb size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
203 1.2 simonb #endif
204 1.2 simonb
205 1.79 jdolecek #if _KERNEL
206 1.79 jdolecek int wl_brperjblock; /* r Block records per journal block */
207 1.79 jdolecek #endif
208 1.79 jdolecek
209 1.38 hannken daddr_t *wl_deallocblks;/* lm: address of block */
210 1.38 hannken int *wl_dealloclens; /* lm: size of block */
211 1.38 hannken int wl_dealloccnt; /* lm: total count */
212 1.2 simonb int wl_dealloclim; /* l: max count */
213 1.2 simonb
214 1.2 simonb /* hashtable of inode numbers for allocated but unlinked inodes */
215 1.2 simonb /* synch ??? */
216 1.60 matt struct wapbl_ino_head *wl_inohash;
217 1.2 simonb u_long wl_inohashmask;
218 1.2 simonb int wl_inohashcnt;
219 1.2 simonb
220 1.2 simonb SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* On disk transaction
221 1.2 simonb accounting */
222 1.54 hannken
223 1.54 hannken u_char *wl_buffer; /* l: buffer for wapbl_buffered_write() */
224 1.54 hannken daddr_t wl_buffer_dblk; /* l: buffer disk block address */
225 1.54 hannken size_t wl_buffer_used; /* l: buffer current use */
226 1.2 simonb };
227 1.2 simonb
228 1.2 simonb #ifdef WAPBL_DEBUG_PRINT
229 1.2 simonb int wapbl_debug_print = WAPBL_DEBUG_PRINT;
230 1.2 simonb #endif
231 1.2 simonb
232 1.2 simonb /****************************************************************/
233 1.2 simonb #ifdef _KERNEL
234 1.2 simonb
235 1.2 simonb #ifdef WAPBL_DEBUG
236 1.2 simonb struct wapbl *wapbl_debug_wl;
237 1.2 simonb #endif
238 1.2 simonb
239 1.2 simonb static int wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail);
240 1.2 simonb static int wapbl_write_blocks(struct wapbl *wl, off_t *offp);
241 1.2 simonb static int wapbl_write_revocations(struct wapbl *wl, off_t *offp);
242 1.2 simonb static int wapbl_write_inodes(struct wapbl *wl, off_t *offp);
243 1.2 simonb #endif /* _KERNEL */
244 1.2 simonb
245 1.14 joerg static int wapbl_replay_process(struct wapbl_replay *wr, off_t, off_t);
246 1.2 simonb
247 1.30 uebayasi static inline size_t wapbl_space_used(size_t avail, off_t head,
248 1.2 simonb off_t tail);
249 1.2 simonb
250 1.2 simonb #ifdef _KERNEL
251 1.2 simonb
252 1.51 para static struct pool wapbl_entry_pool;
253 1.51 para
254 1.2 simonb #define WAPBL_INODETRK_SIZE 83
255 1.2 simonb static int wapbl_ino_pool_refcount;
256 1.2 simonb static struct pool wapbl_ino_pool;
257 1.2 simonb struct wapbl_ino {
258 1.2 simonb LIST_ENTRY(wapbl_ino) wi_hash;
259 1.2 simonb ino_t wi_ino;
260 1.2 simonb mode_t wi_mode;
261 1.2 simonb };
262 1.2 simonb
263 1.2 simonb static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
264 1.2 simonb static void wapbl_inodetrk_free(struct wapbl *wl);
265 1.2 simonb static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
266 1.2 simonb
267 1.2 simonb static size_t wapbl_transaction_len(struct wapbl *wl);
268 1.30 uebayasi static inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
269 1.2 simonb
270 1.13 joerg #if 0
271 1.4 joerg int wapbl_replay_verify(struct wapbl_replay *, struct vnode *);
272 1.4 joerg #endif
273 1.4 joerg
274 1.4 joerg static int wapbl_replay_isopen1(struct wapbl_replay *);
275 1.4 joerg
276 1.2 simonb struct wapbl_ops wapbl_ops = {
277 1.2 simonb .wo_wapbl_discard = wapbl_discard,
278 1.2 simonb .wo_wapbl_replay_isopen = wapbl_replay_isopen1,
279 1.6 joerg .wo_wapbl_replay_can_read = wapbl_replay_can_read,
280 1.2 simonb .wo_wapbl_replay_read = wapbl_replay_read,
281 1.2 simonb .wo_wapbl_add_buf = wapbl_add_buf,
282 1.2 simonb .wo_wapbl_remove_buf = wapbl_remove_buf,
283 1.2 simonb .wo_wapbl_resize_buf = wapbl_resize_buf,
284 1.2 simonb .wo_wapbl_begin = wapbl_begin,
285 1.2 simonb .wo_wapbl_end = wapbl_end,
286 1.2 simonb .wo_wapbl_junlock_assert= wapbl_junlock_assert,
287 1.2 simonb
288 1.2 simonb /* XXX: the following is only used to say "this is a wapbl buf" */
289 1.2 simonb .wo_wapbl_biodone = wapbl_biodone,
290 1.2 simonb };
291 1.2 simonb
292 1.21 yamt static int
293 1.39 christos wapbl_sysctl_init(void)
294 1.39 christos {
295 1.39 christos int rv;
296 1.39 christos const struct sysctlnode *rnode, *cnode;
297 1.39 christos
298 1.39 christos wapbl_sysctl = NULL;
299 1.39 christos
300 1.39 christos rv = sysctl_createv(&wapbl_sysctl, 0, NULL, &rnode,
301 1.39 christos CTLFLAG_PERMANENT,
302 1.39 christos CTLTYPE_NODE, "wapbl",
303 1.39 christos SYSCTL_DESCR("WAPBL journaling options"),
304 1.39 christos NULL, 0, NULL, 0,
305 1.59 pooka CTL_VFS, CTL_CREATE, CTL_EOL);
306 1.39 christos if (rv)
307 1.39 christos return rv;
308 1.39 christos
309 1.39 christos rv = sysctl_createv(&wapbl_sysctl, 0, &rnode, &cnode,
310 1.39 christos CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
311 1.39 christos CTLTYPE_INT, "flush_disk_cache",
312 1.39 christos SYSCTL_DESCR("flush disk cache"),
313 1.39 christos NULL, 0, &wapbl_flush_disk_cache, 0,
314 1.39 christos CTL_CREATE, CTL_EOL);
315 1.39 christos if (rv)
316 1.39 christos return rv;
317 1.39 christos
318 1.39 christos rv = sysctl_createv(&wapbl_sysctl, 0, &rnode, &cnode,
319 1.39 christos CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
320 1.39 christos CTLTYPE_INT, "verbose_commit",
321 1.39 christos SYSCTL_DESCR("show time and size of wapbl log commits"),
322 1.39 christos NULL, 0, &wapbl_verbose_commit, 0,
323 1.39 christos CTL_CREATE, CTL_EOL);
324 1.39 christos return rv;
325 1.39 christos }
326 1.39 christos
327 1.39 christos static void
328 1.39 christos wapbl_init(void)
329 1.39 christos {
330 1.51 para
331 1.51 para pool_init(&wapbl_entry_pool, sizeof(struct wapbl_entry), 0, 0, 0,
332 1.51 para "wapblentrypl", &pool_allocator_kmem, IPL_VM);
333 1.51 para
334 1.39 christos wapbl_sysctl_init();
335 1.39 christos }
336 1.39 christos
337 1.39 christos static int
338 1.74 riastrad wapbl_fini(void)
339 1.39 christos {
340 1.51 para
341 1.63 pgoyette if (wapbl_sysctl != NULL)
342 1.63 pgoyette sysctl_teardown(&wapbl_sysctl);
343 1.51 para
344 1.51 para pool_destroy(&wapbl_entry_pool);
345 1.51 para
346 1.39 christos return 0;
347 1.39 christos }
348 1.39 christos
349 1.39 christos static int
350 1.15 joerg wapbl_start_flush_inodes(struct wapbl *wl, struct wapbl_replay *wr)
351 1.15 joerg {
352 1.15 joerg int error, i;
353 1.15 joerg
354 1.15 joerg WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
355 1.15 joerg ("wapbl_start: reusing log with %d inodes\n", wr->wr_inodescnt));
356 1.15 joerg
357 1.15 joerg /*
358 1.15 joerg * Its only valid to reuse the replay log if its
359 1.15 joerg * the same as the new log we just opened.
360 1.15 joerg */
361 1.15 joerg KDASSERT(!wapbl_replay_isopen(wr));
362 1.47 christos KASSERT(wl->wl_devvp->v_type == VBLK);
363 1.47 christos KASSERT(wr->wr_devvp->v_type == VBLK);
364 1.15 joerg KASSERT(wl->wl_devvp->v_rdev == wr->wr_devvp->v_rdev);
365 1.15 joerg KASSERT(wl->wl_logpbn == wr->wr_logpbn);
366 1.15 joerg KASSERT(wl->wl_circ_size == wr->wr_circ_size);
367 1.15 joerg KASSERT(wl->wl_circ_off == wr->wr_circ_off);
368 1.15 joerg KASSERT(wl->wl_log_dev_bshift == wr->wr_log_dev_bshift);
369 1.15 joerg KASSERT(wl->wl_fs_dev_bshift == wr->wr_fs_dev_bshift);
370 1.15 joerg
371 1.15 joerg wl->wl_wc_header->wc_generation = wr->wr_generation + 1;
372 1.15 joerg
373 1.15 joerg for (i = 0; i < wr->wr_inodescnt; i++)
374 1.15 joerg wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
375 1.15 joerg wr->wr_inodes[i].wr_imode);
376 1.15 joerg
377 1.15 joerg /* Make sure new transaction won't overwrite old inodes list */
378 1.15 joerg KDASSERT(wapbl_transaction_len(wl) <=
379 1.15 joerg wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
380 1.15 joerg wr->wr_inodestail));
381 1.15 joerg
382 1.15 joerg wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
383 1.15 joerg wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
384 1.15 joerg wapbl_transaction_len(wl);
385 1.15 joerg
386 1.15 joerg error = wapbl_write_inodes(wl, &wl->wl_head);
387 1.15 joerg if (error)
388 1.15 joerg return error;
389 1.15 joerg
390 1.15 joerg KASSERT(wl->wl_head != wl->wl_tail);
391 1.15 joerg KASSERT(wl->wl_head != 0);
392 1.15 joerg
393 1.15 joerg return 0;
394 1.15 joerg }
395 1.15 joerg
396 1.2 simonb int
397 1.2 simonb wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
398 1.2 simonb daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
399 1.2 simonb wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
400 1.2 simonb {
401 1.2 simonb struct wapbl *wl;
402 1.2 simonb struct vnode *devvp;
403 1.2 simonb daddr_t logpbn;
404 1.2 simonb int error;
405 1.31 mlelstv int log_dev_bshift = ilog2(blksize);
406 1.32 mlelstv int fs_dev_bshift = log_dev_bshift;
407 1.2 simonb int run;
408 1.2 simonb
409 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_start: vp=%p off=%" PRId64
410 1.2 simonb " count=%zu blksize=%zu\n", vp, off, count, blksize));
411 1.2 simonb
412 1.2 simonb if (log_dev_bshift > fs_dev_bshift) {
413 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_OPEN,
414 1.2 simonb ("wapbl: log device's block size cannot be larger "
415 1.2 simonb "than filesystem's\n"));
416 1.2 simonb /*
417 1.2 simonb * Not currently implemented, although it could be if
418 1.2 simonb * needed someday.
419 1.2 simonb */
420 1.2 simonb return ENOSYS;
421 1.2 simonb }
422 1.2 simonb
423 1.2 simonb if (off < 0)
424 1.2 simonb return EINVAL;
425 1.2 simonb
426 1.2 simonb if (blksize < DEV_BSIZE)
427 1.2 simonb return EINVAL;
428 1.2 simonb if (blksize % DEV_BSIZE)
429 1.2 simonb return EINVAL;
430 1.2 simonb
431 1.2 simonb /* XXXTODO: verify that the full load is writable */
432 1.2 simonb
433 1.2 simonb /*
434 1.2 simonb * XXX check for minimum log size
435 1.2 simonb * minimum is governed by minimum amount of space
436 1.2 simonb * to complete a transaction. (probably truncate)
437 1.2 simonb */
438 1.2 simonb /* XXX for now pick something minimal */
439 1.2 simonb if ((count * blksize) < MAXPHYS) {
440 1.2 simonb return ENOSPC;
441 1.2 simonb }
442 1.2 simonb
443 1.2 simonb if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
444 1.2 simonb return error;
445 1.2 simonb }
446 1.2 simonb
447 1.2 simonb wl = wapbl_calloc(1, sizeof(*wl));
448 1.2 simonb rw_init(&wl->wl_rwlock);
449 1.2 simonb mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
450 1.2 simonb cv_init(&wl->wl_reclaimable_cv, "wapblrec");
451 1.2 simonb LIST_INIT(&wl->wl_bufs);
452 1.2 simonb SIMPLEQ_INIT(&wl->wl_entries);
453 1.2 simonb
454 1.2 simonb wl->wl_logvp = vp;
455 1.2 simonb wl->wl_devvp = devvp;
456 1.2 simonb wl->wl_mount = mp;
457 1.2 simonb wl->wl_logpbn = logpbn;
458 1.2 simonb wl->wl_log_dev_bshift = log_dev_bshift;
459 1.2 simonb wl->wl_fs_dev_bshift = fs_dev_bshift;
460 1.2 simonb
461 1.2 simonb wl->wl_flush = flushfn;
462 1.2 simonb wl->wl_flush_abort = flushabortfn;
463 1.2 simonb
464 1.2 simonb /* Reserve two log device blocks for the commit headers */
465 1.2 simonb wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
466 1.34 mlelstv wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
467 1.2 simonb /* truncate the log usage to a multiple of log_dev_bshift */
468 1.2 simonb wl->wl_circ_size >>= wl->wl_log_dev_bshift;
469 1.2 simonb wl->wl_circ_size <<= wl->wl_log_dev_bshift;
470 1.2 simonb
471 1.2 simonb /*
472 1.2 simonb * wl_bufbytes_max limits the size of the in memory transaction space.
473 1.2 simonb * - Since buffers are allocated and accounted for in units of
474 1.2 simonb * PAGE_SIZE it is required to be a multiple of PAGE_SIZE
475 1.2 simonb * (i.e. 1<<PAGE_SHIFT)
476 1.2 simonb * - Since the log device has to be written in units of
477 1.2 simonb * 1<<wl_log_dev_bshift it is required to be a mulitple of
478 1.2 simonb * 1<<wl_log_dev_bshift.
479 1.2 simonb * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
480 1.2 simonb * it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
481 1.2 simonb * Therefore it must be multiple of the least common multiple of those
482 1.2 simonb * three quantities. Fortunately, all of those quantities are
483 1.2 simonb * guaranteed to be a power of two, and the least common multiple of
484 1.2 simonb * a set of numbers which are all powers of two is simply the maximum
485 1.2 simonb * of those numbers. Finally, the maximum logarithm of a power of two
486 1.2 simonb * is the same as the log of the maximum power of two. So we can do
487 1.2 simonb * the following operations to size wl_bufbytes_max:
488 1.2 simonb */
489 1.2 simonb
490 1.2 simonb /* XXX fix actual number of pages reserved per filesystem. */
491 1.2 simonb wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
492 1.2 simonb
493 1.2 simonb /* Round wl_bufbytes_max to the largest power of two constraint */
494 1.2 simonb wl->wl_bufbytes_max >>= PAGE_SHIFT;
495 1.2 simonb wl->wl_bufbytes_max <<= PAGE_SHIFT;
496 1.2 simonb wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
497 1.2 simonb wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
498 1.2 simonb wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
499 1.2 simonb wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
500 1.2 simonb
501 1.2 simonb /* XXX maybe use filesystem fragment size instead of 1024 */
502 1.2 simonb /* XXX fix actual number of buffers reserved per filesystem. */
503 1.2 simonb wl->wl_bufcount_max = (nbuf / 2) * 1024;
504 1.2 simonb
505 1.79 jdolecek wl->wl_brperjblock = ((1<<wl->wl_log_dev_bshift)
506 1.79 jdolecek - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
507 1.79 jdolecek sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
508 1.79 jdolecek KASSERT(wl->wl_brperjblock > 0);
509 1.79 jdolecek
510 1.2 simonb /* XXX tie this into resource estimation */
511 1.41 hannken wl->wl_dealloclim = wl->wl_bufbytes_max / mp->mnt_stat.f_bsize / 2;
512 1.2 simonb
513 1.51 para wl->wl_deallocblks = wapbl_alloc(sizeof(*wl->wl_deallocblks) *
514 1.2 simonb wl->wl_dealloclim);
515 1.51 para wl->wl_dealloclens = wapbl_alloc(sizeof(*wl->wl_dealloclens) *
516 1.2 simonb wl->wl_dealloclim);
517 1.2 simonb
518 1.54 hannken wl->wl_buffer = wapbl_alloc(MAXPHYS);
519 1.54 hannken wl->wl_buffer_used = 0;
520 1.54 hannken
521 1.2 simonb wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
522 1.2 simonb
523 1.2 simonb /* Initialize the commit header */
524 1.2 simonb {
525 1.2 simonb struct wapbl_wc_header *wc;
526 1.14 joerg size_t len = 1 << wl->wl_log_dev_bshift;
527 1.2 simonb wc = wapbl_calloc(1, len);
528 1.2 simonb wc->wc_type = WAPBL_WC_HEADER;
529 1.2 simonb wc->wc_len = len;
530 1.2 simonb wc->wc_circ_off = wl->wl_circ_off;
531 1.2 simonb wc->wc_circ_size = wl->wl_circ_size;
532 1.2 simonb /* XXX wc->wc_fsid */
533 1.2 simonb wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
534 1.2 simonb wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
535 1.2 simonb wl->wl_wc_header = wc;
536 1.51 para wl->wl_wc_scratch = wapbl_alloc(len);
537 1.2 simonb }
538 1.2 simonb
539 1.2 simonb /*
540 1.2 simonb * if there was an existing set of unlinked but
541 1.2 simonb * allocated inodes, preserve it in the new
542 1.2 simonb * log.
543 1.2 simonb */
544 1.2 simonb if (wr && wr->wr_inodescnt) {
545 1.15 joerg error = wapbl_start_flush_inodes(wl, wr);
546 1.2 simonb if (error)
547 1.2 simonb goto errout;
548 1.2 simonb }
549 1.2 simonb
550 1.2 simonb error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
551 1.2 simonb if (error) {
552 1.2 simonb goto errout;
553 1.2 simonb }
554 1.2 simonb
555 1.2 simonb *wlp = wl;
556 1.2 simonb #if defined(WAPBL_DEBUG)
557 1.2 simonb wapbl_debug_wl = wl;
558 1.2 simonb #endif
559 1.2 simonb
560 1.2 simonb return 0;
561 1.2 simonb errout:
562 1.2 simonb wapbl_discard(wl);
563 1.18 yamt wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
564 1.18 yamt wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
565 1.18 yamt wapbl_free(wl->wl_deallocblks,
566 1.18 yamt sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim);
567 1.18 yamt wapbl_free(wl->wl_dealloclens,
568 1.18 yamt sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim);
569 1.54 hannken wapbl_free(wl->wl_buffer, MAXPHYS);
570 1.2 simonb wapbl_inodetrk_free(wl);
571 1.18 yamt wapbl_free(wl, sizeof(*wl));
572 1.2 simonb
573 1.2 simonb return error;
574 1.2 simonb }
575 1.2 simonb
576 1.2 simonb /*
577 1.2 simonb * Like wapbl_flush, only discards the transaction
578 1.2 simonb * completely
579 1.2 simonb */
580 1.2 simonb
581 1.2 simonb void
582 1.2 simonb wapbl_discard(struct wapbl *wl)
583 1.2 simonb {
584 1.2 simonb struct wapbl_entry *we;
585 1.2 simonb struct buf *bp;
586 1.2 simonb int i;
587 1.2 simonb
588 1.2 simonb /*
589 1.2 simonb * XXX we may consider using upgrade here
590 1.2 simonb * if we want to call flush from inside a transaction
591 1.2 simonb */
592 1.2 simonb rw_enter(&wl->wl_rwlock, RW_WRITER);
593 1.2 simonb wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
594 1.2 simonb wl->wl_dealloccnt);
595 1.2 simonb
596 1.2 simonb #ifdef WAPBL_DEBUG_PRINT
597 1.2 simonb {
598 1.2 simonb pid_t pid = -1;
599 1.2 simonb lwpid_t lid = -1;
600 1.2 simonb if (curproc)
601 1.2 simonb pid = curproc->p_pid;
602 1.2 simonb if (curlwp)
603 1.2 simonb lid = curlwp->l_lid;
604 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
605 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
606 1.2 simonb ("wapbl_discard: thread %d.%d discarding "
607 1.2 simonb "transaction\n"
608 1.2 simonb "\tbufcount=%zu bufbytes=%zu bcount=%zu "
609 1.2 simonb "deallocs=%d inodes=%d\n"
610 1.2 simonb "\terrcnt = %u, reclaimable=%zu reserved=%zu "
611 1.2 simonb "unsynced=%zu\n",
612 1.2 simonb pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
613 1.2 simonb wl->wl_bcount, wl->wl_dealloccnt,
614 1.2 simonb wl->wl_inohashcnt, wl->wl_error_count,
615 1.2 simonb wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
616 1.2 simonb wl->wl_unsynced_bufbytes));
617 1.2 simonb SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
618 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
619 1.2 simonb ("\tentry: bufcount = %zu, reclaimable = %zu, "
620 1.2 simonb "error = %d, unsynced = %zu\n",
621 1.2 simonb we->we_bufcount, we->we_reclaimable_bytes,
622 1.2 simonb we->we_error, we->we_unsynced_bufbytes));
623 1.2 simonb }
624 1.2 simonb #else /* !WAPBL_DEBUG_BUFBYTES */
625 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
626 1.2 simonb ("wapbl_discard: thread %d.%d discarding transaction\n"
627 1.2 simonb "\tbufcount=%zu bufbytes=%zu bcount=%zu "
628 1.2 simonb "deallocs=%d inodes=%d\n"
629 1.2 simonb "\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
630 1.2 simonb pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
631 1.2 simonb wl->wl_bcount, wl->wl_dealloccnt,
632 1.2 simonb wl->wl_inohashcnt, wl->wl_error_count,
633 1.2 simonb wl->wl_reclaimable_bytes, wl->wl_reserved_bytes));
634 1.2 simonb SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
635 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
636 1.2 simonb ("\tentry: bufcount = %zu, reclaimable = %zu, "
637 1.2 simonb "error = %d\n",
638 1.2 simonb we->we_bufcount, we->we_reclaimable_bytes,
639 1.2 simonb we->we_error));
640 1.2 simonb }
641 1.2 simonb #endif /* !WAPBL_DEBUG_BUFBYTES */
642 1.2 simonb }
643 1.2 simonb #endif /* WAPBL_DEBUG_PRINT */
644 1.2 simonb
645 1.2 simonb for (i = 0; i <= wl->wl_inohashmask; i++) {
646 1.2 simonb struct wapbl_ino_head *wih;
647 1.2 simonb struct wapbl_ino *wi;
648 1.2 simonb
649 1.2 simonb wih = &wl->wl_inohash[i];
650 1.2 simonb while ((wi = LIST_FIRST(wih)) != NULL) {
651 1.2 simonb LIST_REMOVE(wi, wi_hash);
652 1.2 simonb pool_put(&wapbl_ino_pool, wi);
653 1.2 simonb KASSERT(wl->wl_inohashcnt > 0);
654 1.2 simonb wl->wl_inohashcnt--;
655 1.2 simonb }
656 1.2 simonb }
657 1.2 simonb
658 1.2 simonb /*
659 1.2 simonb * clean buffer list
660 1.2 simonb */
661 1.2 simonb mutex_enter(&bufcache_lock);
662 1.2 simonb mutex_enter(&wl->wl_mtx);
663 1.2 simonb while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
664 1.2 simonb if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
665 1.2 simonb /*
666 1.2 simonb * The buffer will be unlocked and
667 1.2 simonb * removed from the transaction in brelse
668 1.2 simonb */
669 1.2 simonb mutex_exit(&wl->wl_mtx);
670 1.2 simonb brelsel(bp, 0);
671 1.2 simonb mutex_enter(&wl->wl_mtx);
672 1.2 simonb }
673 1.2 simonb }
674 1.2 simonb mutex_exit(&wl->wl_mtx);
675 1.2 simonb mutex_exit(&bufcache_lock);
676 1.2 simonb
677 1.2 simonb /*
678 1.2 simonb * Remove references to this wl from wl_entries, free any which
679 1.2 simonb * no longer have buffers, others will be freed in wapbl_biodone
680 1.2 simonb * when they no longer have any buffers.
681 1.2 simonb */
682 1.2 simonb while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
683 1.2 simonb SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
684 1.2 simonb /* XXX should we be accumulating wl_error_count
685 1.2 simonb * and increasing reclaimable bytes ? */
686 1.2 simonb we->we_wapbl = NULL;
687 1.2 simonb if (we->we_bufcount == 0) {
688 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
689 1.2 simonb KASSERT(we->we_unsynced_bufbytes == 0);
690 1.2 simonb #endif
691 1.51 para pool_put(&wapbl_entry_pool, we);
692 1.2 simonb }
693 1.2 simonb }
694 1.2 simonb
695 1.2 simonb /* Discard list of deallocs */
696 1.2 simonb wl->wl_dealloccnt = 0;
697 1.2 simonb /* XXX should we clear wl_reserved_bytes? */
698 1.2 simonb
699 1.2 simonb KASSERT(wl->wl_bufbytes == 0);
700 1.2 simonb KASSERT(wl->wl_bcount == 0);
701 1.2 simonb KASSERT(wl->wl_bufcount == 0);
702 1.2 simonb KASSERT(LIST_EMPTY(&wl->wl_bufs));
703 1.2 simonb KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
704 1.2 simonb KASSERT(wl->wl_inohashcnt == 0);
705 1.2 simonb
706 1.2 simonb rw_exit(&wl->wl_rwlock);
707 1.2 simonb }
708 1.2 simonb
709 1.2 simonb int
710 1.2 simonb wapbl_stop(struct wapbl *wl, int force)
711 1.2 simonb {
712 1.2 simonb int error;
713 1.2 simonb
714 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
715 1.2 simonb error = wapbl_flush(wl, 1);
716 1.2 simonb if (error) {
717 1.2 simonb if (force)
718 1.2 simonb wapbl_discard(wl);
719 1.2 simonb else
720 1.2 simonb return error;
721 1.2 simonb }
722 1.2 simonb
723 1.2 simonb /* Unlinked inodes persist after a flush */
724 1.2 simonb if (wl->wl_inohashcnt) {
725 1.2 simonb if (force) {
726 1.2 simonb wapbl_discard(wl);
727 1.2 simonb } else {
728 1.2 simonb return EBUSY;
729 1.2 simonb }
730 1.2 simonb }
731 1.2 simonb
732 1.2 simonb KASSERT(wl->wl_bufbytes == 0);
733 1.2 simonb KASSERT(wl->wl_bcount == 0);
734 1.2 simonb KASSERT(wl->wl_bufcount == 0);
735 1.2 simonb KASSERT(LIST_EMPTY(&wl->wl_bufs));
736 1.2 simonb KASSERT(wl->wl_dealloccnt == 0);
737 1.2 simonb KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
738 1.2 simonb KASSERT(wl->wl_inohashcnt == 0);
739 1.2 simonb
740 1.18 yamt wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
741 1.18 yamt wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
742 1.18 yamt wapbl_free(wl->wl_deallocblks,
743 1.18 yamt sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim);
744 1.18 yamt wapbl_free(wl->wl_dealloclens,
745 1.18 yamt sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim);
746 1.54 hannken wapbl_free(wl->wl_buffer, MAXPHYS);
747 1.2 simonb wapbl_inodetrk_free(wl);
748 1.2 simonb
749 1.2 simonb cv_destroy(&wl->wl_reclaimable_cv);
750 1.2 simonb mutex_destroy(&wl->wl_mtx);
751 1.2 simonb rw_destroy(&wl->wl_rwlock);
752 1.18 yamt wapbl_free(wl, sizeof(*wl));
753 1.2 simonb
754 1.2 simonb return 0;
755 1.2 simonb }
756 1.2 simonb
757 1.71 riastrad /****************************************************************/
758 1.71 riastrad /*
759 1.71 riastrad * Unbuffered disk I/O
760 1.71 riastrad */
761 1.71 riastrad
762 1.2 simonb static int
763 1.2 simonb wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
764 1.2 simonb {
765 1.2 simonb struct pstats *pstats = curlwp->l_proc->p_stats;
766 1.2 simonb struct buf *bp;
767 1.2 simonb int error;
768 1.2 simonb
769 1.2 simonb KASSERT((flags & ~(B_WRITE | B_READ)) == 0);
770 1.2 simonb KASSERT(devvp->v_type == VBLK);
771 1.2 simonb
772 1.2 simonb if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
773 1.45 rmind mutex_enter(devvp->v_interlock);
774 1.2 simonb devvp->v_numoutput++;
775 1.45 rmind mutex_exit(devvp->v_interlock);
776 1.2 simonb pstats->p_ru.ru_oublock++;
777 1.2 simonb } else {
778 1.2 simonb pstats->p_ru.ru_inblock++;
779 1.2 simonb }
780 1.2 simonb
781 1.2 simonb bp = getiobuf(devvp, true);
782 1.2 simonb bp->b_flags = flags;
783 1.2 simonb bp->b_cflags = BC_BUSY; /* silly & dubious */
784 1.2 simonb bp->b_dev = devvp->v_rdev;
785 1.2 simonb bp->b_data = data;
786 1.2 simonb bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
787 1.2 simonb bp->b_blkno = pbn;
788 1.52 chs BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
789 1.2 simonb
790 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_IO,
791 1.29 pooka ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%"PRIx64"\n",
792 1.2 simonb BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
793 1.2 simonb bp->b_blkno, bp->b_dev));
794 1.2 simonb
795 1.2 simonb VOP_STRATEGY(devvp, bp);
796 1.2 simonb
797 1.2 simonb error = biowait(bp);
798 1.2 simonb putiobuf(bp);
799 1.2 simonb
800 1.2 simonb if (error) {
801 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_ERROR,
802 1.2 simonb ("wapbl_doio: %s %zu bytes at block %" PRId64
803 1.29 pooka " on dev 0x%"PRIx64" failed with error %d\n",
804 1.2 simonb (((flags & (B_WRITE | B_READ)) == B_WRITE) ?
805 1.2 simonb "write" : "read"),
806 1.2 simonb len, pbn, devvp->v_rdev, error));
807 1.2 simonb }
808 1.2 simonb
809 1.2 simonb return error;
810 1.2 simonb }
811 1.2 simonb
812 1.71 riastrad /*
813 1.71 riastrad * wapbl_write(data, len, devvp, pbn)
814 1.71 riastrad *
815 1.71 riastrad * Synchronously write len bytes from data to physical block pbn
816 1.71 riastrad * on devvp.
817 1.71 riastrad */
818 1.2 simonb int
819 1.2 simonb wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
820 1.2 simonb {
821 1.2 simonb
822 1.2 simonb return wapbl_doio(data, len, devvp, pbn, B_WRITE);
823 1.2 simonb }
824 1.2 simonb
825 1.71 riastrad /*
826 1.71 riastrad * wapbl_read(data, len, devvp, pbn)
827 1.71 riastrad *
828 1.71 riastrad * Synchronously read len bytes into data from physical block pbn
829 1.71 riastrad * on devvp.
830 1.71 riastrad */
831 1.2 simonb int
832 1.2 simonb wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
833 1.2 simonb {
834 1.2 simonb
835 1.2 simonb return wapbl_doio(data, len, devvp, pbn, B_READ);
836 1.2 simonb }
837 1.2 simonb
838 1.71 riastrad /****************************************************************/
839 1.71 riastrad /*
840 1.71 riastrad * Buffered disk writes -- try to coalesce writes and emit
841 1.71 riastrad * MAXPHYS-aligned blocks.
842 1.71 riastrad */
843 1.71 riastrad
844 1.2 simonb /*
845 1.71 riastrad * wapbl_buffered_flush(wl)
846 1.71 riastrad *
847 1.71 riastrad * Flush any buffered writes from wapbl_buffered_write.
848 1.54 hannken */
849 1.54 hannken static int
850 1.54 hannken wapbl_buffered_flush(struct wapbl *wl)
851 1.54 hannken {
852 1.54 hannken int error;
853 1.54 hannken
854 1.54 hannken if (wl->wl_buffer_used == 0)
855 1.54 hannken return 0;
856 1.54 hannken
857 1.54 hannken error = wapbl_doio(wl->wl_buffer, wl->wl_buffer_used,
858 1.54 hannken wl->wl_devvp, wl->wl_buffer_dblk, B_WRITE);
859 1.54 hannken wl->wl_buffer_used = 0;
860 1.54 hannken
861 1.54 hannken return error;
862 1.54 hannken }
863 1.54 hannken
864 1.54 hannken /*
865 1.71 riastrad * wapbl_buffered_write(data, len, wl, pbn)
866 1.71 riastrad *
867 1.71 riastrad * Write len bytes from data to physical block pbn on
868 1.71 riastrad * wl->wl_devvp. The write may not complete until
869 1.71 riastrad * wapbl_buffered_flush.
870 1.54 hannken */
871 1.54 hannken static int
872 1.54 hannken wapbl_buffered_write(void *data, size_t len, struct wapbl *wl, daddr_t pbn)
873 1.54 hannken {
874 1.54 hannken int error;
875 1.54 hannken size_t resid;
876 1.54 hannken
877 1.54 hannken /*
878 1.54 hannken * If not adjacent to buffered data flush first. Disk block
879 1.54 hannken * address is always valid for non-empty buffer.
880 1.54 hannken */
881 1.54 hannken if (wl->wl_buffer_used > 0 &&
882 1.54 hannken pbn != wl->wl_buffer_dblk + btodb(wl->wl_buffer_used)) {
883 1.54 hannken error = wapbl_buffered_flush(wl);
884 1.54 hannken if (error)
885 1.54 hannken return error;
886 1.54 hannken }
887 1.54 hannken /*
888 1.54 hannken * If this write goes to an empty buffer we have to
889 1.54 hannken * save the disk block address first.
890 1.54 hannken */
891 1.54 hannken if (wl->wl_buffer_used == 0)
892 1.54 hannken wl->wl_buffer_dblk = pbn;
893 1.54 hannken /*
894 1.54 hannken * Remaining space so this buffer ends on a MAXPHYS boundary.
895 1.54 hannken *
896 1.54 hannken * Cannot become less or equal zero as the buffer would have been
897 1.54 hannken * flushed on the last call then.
898 1.54 hannken */
899 1.54 hannken resid = MAXPHYS - dbtob(wl->wl_buffer_dblk % btodb(MAXPHYS)) -
900 1.54 hannken wl->wl_buffer_used;
901 1.54 hannken KASSERT(resid > 0);
902 1.54 hannken KASSERT(dbtob(btodb(resid)) == resid);
903 1.54 hannken if (len >= resid) {
904 1.54 hannken memcpy(wl->wl_buffer + wl->wl_buffer_used, data, resid);
905 1.54 hannken wl->wl_buffer_used += resid;
906 1.54 hannken error = wapbl_doio(wl->wl_buffer, wl->wl_buffer_used,
907 1.54 hannken wl->wl_devvp, wl->wl_buffer_dblk, B_WRITE);
908 1.54 hannken data = (uint8_t *)data + resid;
909 1.54 hannken len -= resid;
910 1.54 hannken wl->wl_buffer_dblk = pbn + btodb(resid);
911 1.54 hannken wl->wl_buffer_used = 0;
912 1.54 hannken if (error)
913 1.54 hannken return error;
914 1.54 hannken }
915 1.54 hannken KASSERT(len < MAXPHYS);
916 1.54 hannken if (len > 0) {
917 1.54 hannken memcpy(wl->wl_buffer + wl->wl_buffer_used, data, len);
918 1.54 hannken wl->wl_buffer_used += len;
919 1.54 hannken }
920 1.54 hannken
921 1.54 hannken return 0;
922 1.54 hannken }
923 1.54 hannken
924 1.54 hannken /*
925 1.71 riastrad * wapbl_circ_write(wl, data, len, offp)
926 1.71 riastrad *
927 1.71 riastrad * Write len bytes from data to the circular queue of wl, starting
928 1.71 riastrad * at linear byte offset *offp, and returning the new linear byte
929 1.71 riastrad * offset in *offp.
930 1.71 riastrad *
931 1.71 riastrad * If the starting linear byte offset precedes wl->wl_circ_off,
932 1.71 riastrad * the write instead begins at wl->wl_circ_off. XXX WTF? This
933 1.71 riastrad * should be a KASSERT, not a conditional.
934 1.71 riastrad *
935 1.71 riastrad * The write is buffered in wl and must be flushed with
936 1.71 riastrad * wapbl_buffered_flush before it will be submitted to the disk.
937 1.2 simonb */
938 1.2 simonb static int
939 1.2 simonb wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
940 1.2 simonb {
941 1.2 simonb size_t slen;
942 1.2 simonb off_t off = *offp;
943 1.2 simonb int error;
944 1.34 mlelstv daddr_t pbn;
945 1.2 simonb
946 1.2 simonb KDASSERT(((len >> wl->wl_log_dev_bshift) <<
947 1.2 simonb wl->wl_log_dev_bshift) == len);
948 1.2 simonb
949 1.2 simonb if (off < wl->wl_circ_off)
950 1.2 simonb off = wl->wl_circ_off;
951 1.2 simonb slen = wl->wl_circ_off + wl->wl_circ_size - off;
952 1.2 simonb if (slen < len) {
953 1.34 mlelstv pbn = wl->wl_logpbn + (off >> wl->wl_log_dev_bshift);
954 1.34 mlelstv #ifdef _KERNEL
955 1.34 mlelstv pbn = btodb(pbn << wl->wl_log_dev_bshift);
956 1.34 mlelstv #endif
957 1.54 hannken error = wapbl_buffered_write(data, slen, wl, pbn);
958 1.2 simonb if (error)
959 1.2 simonb return error;
960 1.2 simonb data = (uint8_t *)data + slen;
961 1.2 simonb len -= slen;
962 1.2 simonb off = wl->wl_circ_off;
963 1.2 simonb }
964 1.34 mlelstv pbn = wl->wl_logpbn + (off >> wl->wl_log_dev_bshift);
965 1.34 mlelstv #ifdef _KERNEL
966 1.34 mlelstv pbn = btodb(pbn << wl->wl_log_dev_bshift);
967 1.34 mlelstv #endif
968 1.54 hannken error = wapbl_buffered_write(data, len, wl, pbn);
969 1.2 simonb if (error)
970 1.2 simonb return error;
971 1.2 simonb off += len;
972 1.2 simonb if (off >= wl->wl_circ_off + wl->wl_circ_size)
973 1.2 simonb off = wl->wl_circ_off;
974 1.2 simonb *offp = off;
975 1.2 simonb return 0;
976 1.2 simonb }
977 1.2 simonb
978 1.2 simonb /****************************************************************/
979 1.71 riastrad /*
980 1.71 riastrad * WAPBL transactions: entering, adding/removing bufs, and exiting
981 1.71 riastrad */
982 1.2 simonb
983 1.2 simonb int
984 1.2 simonb wapbl_begin(struct wapbl *wl, const char *file, int line)
985 1.2 simonb {
986 1.2 simonb int doflush;
987 1.2 simonb unsigned lockcount;
988 1.2 simonb
989 1.2 simonb KDASSERT(wl);
990 1.2 simonb
991 1.2 simonb /*
992 1.2 simonb * XXX this needs to be made much more sophisticated.
993 1.2 simonb * perhaps each wapbl_begin could reserve a specified
994 1.2 simonb * number of buffers and bytes.
995 1.2 simonb */
996 1.2 simonb mutex_enter(&wl->wl_mtx);
997 1.2 simonb lockcount = wl->wl_lock_count;
998 1.2 simonb doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) >
999 1.2 simonb wl->wl_bufbytes_max / 2) ||
1000 1.2 simonb ((wl->wl_bufcount + (lockcount * 10)) >
1001 1.2 simonb wl->wl_bufcount_max / 2) ||
1002 1.28 pooka (wapbl_transaction_len(wl) > wl->wl_circ_size / 2) ||
1003 1.42 hannken (wl->wl_dealloccnt >= (wl->wl_dealloclim / 2));
1004 1.2 simonb mutex_exit(&wl->wl_mtx);
1005 1.2 simonb
1006 1.2 simonb if (doflush) {
1007 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1008 1.2 simonb ("force flush lockcnt=%d bufbytes=%zu "
1009 1.28 pooka "(max=%zu) bufcount=%zu (max=%zu) "
1010 1.28 pooka "dealloccnt %d (lim=%d)\n",
1011 1.2 simonb lockcount, wl->wl_bufbytes,
1012 1.2 simonb wl->wl_bufbytes_max, wl->wl_bufcount,
1013 1.28 pooka wl->wl_bufcount_max,
1014 1.28 pooka wl->wl_dealloccnt, wl->wl_dealloclim));
1015 1.2 simonb }
1016 1.2 simonb
1017 1.2 simonb if (doflush) {
1018 1.2 simonb int error = wapbl_flush(wl, 0);
1019 1.2 simonb if (error)
1020 1.2 simonb return error;
1021 1.2 simonb }
1022 1.2 simonb
1023 1.23 ad rw_enter(&wl->wl_rwlock, RW_READER);
1024 1.2 simonb mutex_enter(&wl->wl_mtx);
1025 1.2 simonb wl->wl_lock_count++;
1026 1.2 simonb mutex_exit(&wl->wl_mtx);
1027 1.2 simonb
1028 1.23 ad #if defined(WAPBL_DEBUG_PRINT)
1029 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
1030 1.2 simonb ("wapbl_begin thread %d.%d with bufcount=%zu "
1031 1.2 simonb "bufbytes=%zu bcount=%zu at %s:%d\n",
1032 1.2 simonb curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1033 1.2 simonb wl->wl_bufbytes, wl->wl_bcount, file, line));
1034 1.2 simonb #endif
1035 1.2 simonb
1036 1.2 simonb return 0;
1037 1.2 simonb }
1038 1.2 simonb
1039 1.2 simonb void
1040 1.2 simonb wapbl_end(struct wapbl *wl)
1041 1.2 simonb {
1042 1.2 simonb
1043 1.23 ad #if defined(WAPBL_DEBUG_PRINT)
1044 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
1045 1.2 simonb ("wapbl_end thread %d.%d with bufcount=%zu "
1046 1.2 simonb "bufbytes=%zu bcount=%zu\n",
1047 1.2 simonb curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1048 1.2 simonb wl->wl_bufbytes, wl->wl_bcount));
1049 1.2 simonb #endif
1050 1.2 simonb
1051 1.65 riastrad /*
1052 1.65 riastrad * XXX this could be handled more gracefully, perhaps place
1053 1.65 riastrad * only a partial transaction in the log and allow the
1054 1.65 riastrad * remaining to flush without the protection of the journal.
1055 1.65 riastrad */
1056 1.67 riastrad KASSERTMSG((wapbl_transaction_len(wl) <=
1057 1.67 riastrad (wl->wl_circ_size - wl->wl_reserved_bytes)),
1058 1.65 riastrad "wapbl_end: current transaction too big to flush");
1059 1.40 bouyer
1060 1.2 simonb mutex_enter(&wl->wl_mtx);
1061 1.2 simonb KASSERT(wl->wl_lock_count > 0);
1062 1.2 simonb wl->wl_lock_count--;
1063 1.2 simonb mutex_exit(&wl->wl_mtx);
1064 1.2 simonb
1065 1.2 simonb rw_exit(&wl->wl_rwlock);
1066 1.2 simonb }
1067 1.2 simonb
1068 1.2 simonb void
1069 1.2 simonb wapbl_add_buf(struct wapbl *wl, struct buf * bp)
1070 1.2 simonb {
1071 1.2 simonb
1072 1.2 simonb KASSERT(bp->b_cflags & BC_BUSY);
1073 1.2 simonb KASSERT(bp->b_vp);
1074 1.2 simonb
1075 1.2 simonb wapbl_jlock_assert(wl);
1076 1.2 simonb
1077 1.2 simonb #if 0
1078 1.2 simonb /*
1079 1.2 simonb * XXX this might be an issue for swapfiles.
1080 1.2 simonb * see uvm_swap.c:1702
1081 1.2 simonb *
1082 1.2 simonb * XXX2 why require it then? leap of semantics?
1083 1.2 simonb */
1084 1.2 simonb KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
1085 1.2 simonb #endif
1086 1.2 simonb
1087 1.2 simonb mutex_enter(&wl->wl_mtx);
1088 1.2 simonb if (bp->b_flags & B_LOCKED) {
1089 1.2 simonb LIST_REMOVE(bp, b_wapbllist);
1090 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
1091 1.2 simonb ("wapbl_add_buf thread %d.%d re-adding buf %p "
1092 1.2 simonb "with %d bytes %d bcount\n",
1093 1.2 simonb curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
1094 1.2 simonb bp->b_bcount));
1095 1.2 simonb } else {
1096 1.2 simonb /* unlocked by dirty buffers shouldn't exist */
1097 1.2 simonb KASSERT(!(bp->b_oflags & BO_DELWRI));
1098 1.2 simonb wl->wl_bufbytes += bp->b_bufsize;
1099 1.2 simonb wl->wl_bcount += bp->b_bcount;
1100 1.2 simonb wl->wl_bufcount++;
1101 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
1102 1.2 simonb ("wapbl_add_buf thread %d.%d adding buf %p "
1103 1.2 simonb "with %d bytes %d bcount\n",
1104 1.2 simonb curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
1105 1.2 simonb bp->b_bcount));
1106 1.2 simonb }
1107 1.2 simonb LIST_INSERT_HEAD(&wl->wl_bufs, bp, b_wapbllist);
1108 1.2 simonb mutex_exit(&wl->wl_mtx);
1109 1.2 simonb
1110 1.2 simonb bp->b_flags |= B_LOCKED;
1111 1.2 simonb }
1112 1.2 simonb
1113 1.2 simonb static void
1114 1.2 simonb wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
1115 1.2 simonb {
1116 1.2 simonb
1117 1.2 simonb KASSERT(mutex_owned(&wl->wl_mtx));
1118 1.2 simonb KASSERT(bp->b_cflags & BC_BUSY);
1119 1.2 simonb wapbl_jlock_assert(wl);
1120 1.2 simonb
1121 1.2 simonb #if 0
1122 1.2 simonb /*
1123 1.2 simonb * XXX this might be an issue for swapfiles.
1124 1.2 simonb * see uvm_swap.c:1725
1125 1.2 simonb *
1126 1.2 simonb * XXXdeux: see above
1127 1.2 simonb */
1128 1.2 simonb KASSERT((bp->b_flags & BC_NOCACHE) == 0);
1129 1.2 simonb #endif
1130 1.2 simonb KASSERT(bp->b_flags & B_LOCKED);
1131 1.2 simonb
1132 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
1133 1.2 simonb ("wapbl_remove_buf thread %d.%d removing buf %p with "
1134 1.2 simonb "%d bytes %d bcount\n",
1135 1.2 simonb curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, bp->b_bcount));
1136 1.2 simonb
1137 1.2 simonb KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
1138 1.2 simonb wl->wl_bufbytes -= bp->b_bufsize;
1139 1.2 simonb KASSERT(wl->wl_bcount >= bp->b_bcount);
1140 1.2 simonb wl->wl_bcount -= bp->b_bcount;
1141 1.2 simonb KASSERT(wl->wl_bufcount > 0);
1142 1.2 simonb wl->wl_bufcount--;
1143 1.2 simonb KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1144 1.2 simonb KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1145 1.2 simonb LIST_REMOVE(bp, b_wapbllist);
1146 1.2 simonb
1147 1.2 simonb bp->b_flags &= ~B_LOCKED;
1148 1.2 simonb }
1149 1.2 simonb
1150 1.2 simonb /* called from brelsel() in vfs_bio among other places */
1151 1.2 simonb void
1152 1.2 simonb wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
1153 1.2 simonb {
1154 1.2 simonb
1155 1.2 simonb mutex_enter(&wl->wl_mtx);
1156 1.2 simonb wapbl_remove_buf_locked(wl, bp);
1157 1.2 simonb mutex_exit(&wl->wl_mtx);
1158 1.2 simonb }
1159 1.2 simonb
1160 1.2 simonb void
1161 1.2 simonb wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
1162 1.2 simonb {
1163 1.2 simonb
1164 1.2 simonb KASSERT(bp->b_cflags & BC_BUSY);
1165 1.2 simonb
1166 1.2 simonb /*
1167 1.2 simonb * XXX: why does this depend on B_LOCKED? otherwise the buf
1168 1.2 simonb * is not for a transaction? if so, why is this called in the
1169 1.2 simonb * first place?
1170 1.2 simonb */
1171 1.2 simonb if (bp->b_flags & B_LOCKED) {
1172 1.2 simonb mutex_enter(&wl->wl_mtx);
1173 1.2 simonb wl->wl_bufbytes += bp->b_bufsize - oldsz;
1174 1.2 simonb wl->wl_bcount += bp->b_bcount - oldcnt;
1175 1.2 simonb mutex_exit(&wl->wl_mtx);
1176 1.2 simonb }
1177 1.2 simonb }
1178 1.2 simonb
1179 1.2 simonb #endif /* _KERNEL */
1180 1.2 simonb
1181 1.2 simonb /****************************************************************/
1182 1.2 simonb /* Some utility inlines */
1183 1.2 simonb
1184 1.71 riastrad /*
1185 1.71 riastrad * wapbl_space_used(avail, head, tail)
1186 1.71 riastrad *
1187 1.71 riastrad * Number of bytes used in a circular queue of avail total bytes,
1188 1.71 riastrad * from tail to head.
1189 1.71 riastrad */
1190 1.56 joerg static inline size_t
1191 1.56 joerg wapbl_space_used(size_t avail, off_t head, off_t tail)
1192 1.56 joerg {
1193 1.56 joerg
1194 1.56 joerg if (tail == 0) {
1195 1.56 joerg KASSERT(head == 0);
1196 1.56 joerg return 0;
1197 1.56 joerg }
1198 1.56 joerg return ((head + (avail - 1) - tail) % avail) + 1;
1199 1.56 joerg }
1200 1.56 joerg
1201 1.56 joerg #ifdef _KERNEL
1202 1.71 riastrad /*
1203 1.71 riastrad * wapbl_advance(size, off, oldoff, delta)
1204 1.71 riastrad *
1205 1.71 riastrad * Given a byte offset oldoff into a circular queue of size bytes
1206 1.71 riastrad * starting at off, return a new byte offset oldoff + delta into
1207 1.71 riastrad * the circular queue.
1208 1.71 riastrad */
1209 1.30 uebayasi static inline off_t
1210 1.60 matt wapbl_advance(size_t size, size_t off, off_t oldoff, size_t delta)
1211 1.2 simonb {
1212 1.60 matt off_t newoff;
1213 1.2 simonb
1214 1.2 simonb /* Define acceptable ranges for inputs. */
1215 1.46 christos KASSERT(delta <= (size_t)size);
1216 1.60 matt KASSERT((oldoff == 0) || ((size_t)oldoff >= off));
1217 1.60 matt KASSERT(oldoff < (off_t)(size + off));
1218 1.2 simonb
1219 1.60 matt if ((oldoff == 0) && (delta != 0))
1220 1.60 matt newoff = off + delta;
1221 1.60 matt else if ((oldoff + delta) < (size + off))
1222 1.60 matt newoff = oldoff + delta;
1223 1.2 simonb else
1224 1.60 matt newoff = (oldoff + delta) - size;
1225 1.2 simonb
1226 1.2 simonb /* Note some interesting axioms */
1227 1.60 matt KASSERT((delta != 0) || (newoff == oldoff));
1228 1.60 matt KASSERT((delta == 0) || (newoff != 0));
1229 1.60 matt KASSERT((delta != (size)) || (newoff == oldoff));
1230 1.2 simonb
1231 1.2 simonb /* Define acceptable ranges for output. */
1232 1.60 matt KASSERT((newoff == 0) || ((size_t)newoff >= off));
1233 1.60 matt KASSERT((size_t)newoff < (size + off));
1234 1.60 matt return newoff;
1235 1.2 simonb }
1236 1.2 simonb
1237 1.71 riastrad /*
1238 1.71 riastrad * wapbl_space_free(avail, head, tail)
1239 1.71 riastrad *
1240 1.71 riastrad * Number of bytes free in a circular queue of avail total bytes,
1241 1.71 riastrad * in which everything from tail to head is used.
1242 1.71 riastrad */
1243 1.30 uebayasi static inline size_t
1244 1.2 simonb wapbl_space_free(size_t avail, off_t head, off_t tail)
1245 1.2 simonb {
1246 1.2 simonb
1247 1.2 simonb return avail - wapbl_space_used(avail, head, tail);
1248 1.2 simonb }
1249 1.2 simonb
1250 1.71 riastrad /*
1251 1.71 riastrad * wapbl_advance_head(size, off, delta, headp, tailp)
1252 1.71 riastrad *
1253 1.71 riastrad * In a circular queue of size bytes starting at off, given the
1254 1.71 riastrad * old head and tail offsets *headp and *tailp, store the new head
1255 1.71 riastrad * and tail offsets in *headp and *tailp resulting from adding
1256 1.71 riastrad * delta bytes of data to the head.
1257 1.71 riastrad */
1258 1.30 uebayasi static inline void
1259 1.2 simonb wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1260 1.2 simonb off_t *tailp)
1261 1.2 simonb {
1262 1.2 simonb off_t head = *headp;
1263 1.2 simonb off_t tail = *tailp;
1264 1.2 simonb
1265 1.2 simonb KASSERT(delta <= wapbl_space_free(size, head, tail));
1266 1.2 simonb head = wapbl_advance(size, off, head, delta);
1267 1.2 simonb if ((tail == 0) && (head != 0))
1268 1.2 simonb tail = off;
1269 1.2 simonb *headp = head;
1270 1.2 simonb *tailp = tail;
1271 1.2 simonb }
1272 1.2 simonb
1273 1.71 riastrad /*
1274 1.71 riastrad * wapbl_advance_tail(size, off, delta, headp, tailp)
1275 1.71 riastrad *
1276 1.71 riastrad * In a circular queue of size bytes starting at off, given the
1277 1.71 riastrad * old head and tail offsets *headp and *tailp, store the new head
1278 1.71 riastrad * and tail offsets in *headp and *tailp resulting from removing
1279 1.71 riastrad * delta bytes of data from the tail.
1280 1.71 riastrad */
1281 1.30 uebayasi static inline void
1282 1.2 simonb wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1283 1.2 simonb off_t *tailp)
1284 1.2 simonb {
1285 1.2 simonb off_t head = *headp;
1286 1.2 simonb off_t tail = *tailp;
1287 1.2 simonb
1288 1.2 simonb KASSERT(delta <= wapbl_space_used(size, head, tail));
1289 1.2 simonb tail = wapbl_advance(size, off, tail, delta);
1290 1.2 simonb if (head == tail) {
1291 1.2 simonb head = tail = 0;
1292 1.2 simonb }
1293 1.2 simonb *headp = head;
1294 1.2 simonb *tailp = tail;
1295 1.2 simonb }
1296 1.2 simonb
1297 1.2 simonb
1298 1.2 simonb /****************************************************************/
1299 1.2 simonb
1300 1.2 simonb /*
1301 1.73 riastrad * wapbl_truncate(wl, minfree)
1302 1.71 riastrad *
1303 1.71 riastrad * Wait until at least minfree bytes are available in the log.
1304 1.71 riastrad *
1305 1.73 riastrad * If it was necessary to wait for writes to complete,
1306 1.73 riastrad * advance the circular queue tail to reflect the new write
1307 1.73 riastrad * completions and issue a write commit to the log.
1308 1.71 riastrad *
1309 1.71 riastrad * => Caller must hold wl->wl_rwlock writer lock.
1310 1.2 simonb */
1311 1.2 simonb static int
1312 1.73 riastrad wapbl_truncate(struct wapbl *wl, size_t minfree)
1313 1.2 simonb {
1314 1.2 simonb size_t delta;
1315 1.2 simonb size_t avail;
1316 1.2 simonb off_t head;
1317 1.2 simonb off_t tail;
1318 1.2 simonb int error = 0;
1319 1.2 simonb
1320 1.2 simonb KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1321 1.2 simonb KASSERT(rw_write_held(&wl->wl_rwlock));
1322 1.2 simonb
1323 1.2 simonb mutex_enter(&wl->wl_mtx);
1324 1.2 simonb
1325 1.2 simonb /*
1326 1.2 simonb * First check to see if we have to do a commit
1327 1.2 simonb * at all.
1328 1.2 simonb */
1329 1.2 simonb avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1330 1.2 simonb if (minfree < avail) {
1331 1.2 simonb mutex_exit(&wl->wl_mtx);
1332 1.2 simonb return 0;
1333 1.2 simonb }
1334 1.2 simonb minfree -= avail;
1335 1.2 simonb while ((wl->wl_error_count == 0) &&
1336 1.2 simonb (wl->wl_reclaimable_bytes < minfree)) {
1337 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1338 1.2 simonb ("wapbl_truncate: sleeping on %p wl=%p bytes=%zd "
1339 1.2 simonb "minfree=%zd\n",
1340 1.2 simonb &wl->wl_reclaimable_bytes, wl, wl->wl_reclaimable_bytes,
1341 1.2 simonb minfree));
1342 1.2 simonb
1343 1.2 simonb cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1344 1.2 simonb }
1345 1.2 simonb if (wl->wl_reclaimable_bytes < minfree) {
1346 1.2 simonb KASSERT(wl->wl_error_count);
1347 1.2 simonb /* XXX maybe get actual error from buffer instead someday? */
1348 1.2 simonb error = EIO;
1349 1.2 simonb }
1350 1.2 simonb head = wl->wl_head;
1351 1.2 simonb tail = wl->wl_tail;
1352 1.2 simonb delta = wl->wl_reclaimable_bytes;
1353 1.2 simonb
1354 1.2 simonb /* If all of of the entries are flushed, then be sure to keep
1355 1.2 simonb * the reserved bytes reserved. Watch out for discarded transactions,
1356 1.2 simonb * which could leave more bytes reserved than are reclaimable.
1357 1.2 simonb */
1358 1.2 simonb if (SIMPLEQ_EMPTY(&wl->wl_entries) &&
1359 1.2 simonb (delta >= wl->wl_reserved_bytes)) {
1360 1.2 simonb delta -= wl->wl_reserved_bytes;
1361 1.2 simonb }
1362 1.2 simonb wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1363 1.2 simonb &tail);
1364 1.2 simonb KDASSERT(wl->wl_reserved_bytes <=
1365 1.2 simonb wapbl_space_used(wl->wl_circ_size, head, tail));
1366 1.2 simonb mutex_exit(&wl->wl_mtx);
1367 1.2 simonb
1368 1.2 simonb if (error)
1369 1.2 simonb return error;
1370 1.2 simonb
1371 1.2 simonb /*
1372 1.2 simonb * This is where head, tail and delta are unprotected
1373 1.2 simonb * from races against itself or flush. This is ok since
1374 1.2 simonb * we only call this routine from inside flush itself.
1375 1.2 simonb *
1376 1.2 simonb * XXX: how can it race against itself when accessed only
1377 1.2 simonb * from behind the write-locked rwlock?
1378 1.2 simonb */
1379 1.2 simonb error = wapbl_write_commit(wl, head, tail);
1380 1.2 simonb if (error)
1381 1.2 simonb return error;
1382 1.2 simonb
1383 1.2 simonb wl->wl_head = head;
1384 1.2 simonb wl->wl_tail = tail;
1385 1.2 simonb
1386 1.2 simonb mutex_enter(&wl->wl_mtx);
1387 1.2 simonb KASSERT(wl->wl_reclaimable_bytes >= delta);
1388 1.2 simonb wl->wl_reclaimable_bytes -= delta;
1389 1.2 simonb mutex_exit(&wl->wl_mtx);
1390 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1391 1.2 simonb ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1392 1.2 simonb curproc->p_pid, curlwp->l_lid, delta));
1393 1.2 simonb
1394 1.2 simonb return 0;
1395 1.2 simonb }
1396 1.2 simonb
1397 1.2 simonb /****************************************************************/
1398 1.2 simonb
1399 1.2 simonb void
1400 1.2 simonb wapbl_biodone(struct buf *bp)
1401 1.2 simonb {
1402 1.2 simonb struct wapbl_entry *we = bp->b_private;
1403 1.2 simonb struct wapbl *wl = we->we_wapbl;
1404 1.53 hannken #ifdef WAPBL_DEBUG_BUFBYTES
1405 1.53 hannken const int bufsize = bp->b_bufsize;
1406 1.53 hannken #endif
1407 1.2 simonb
1408 1.2 simonb /*
1409 1.2 simonb * Handle possible flushing of buffers after log has been
1410 1.2 simonb * decomissioned.
1411 1.2 simonb */
1412 1.2 simonb if (!wl) {
1413 1.2 simonb KASSERT(we->we_bufcount > 0);
1414 1.2 simonb we->we_bufcount--;
1415 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1416 1.53 hannken KASSERT(we->we_unsynced_bufbytes >= bufsize);
1417 1.53 hannken we->we_unsynced_bufbytes -= bufsize;
1418 1.2 simonb #endif
1419 1.2 simonb
1420 1.2 simonb if (we->we_bufcount == 0) {
1421 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1422 1.2 simonb KASSERT(we->we_unsynced_bufbytes == 0);
1423 1.2 simonb #endif
1424 1.51 para pool_put(&wapbl_entry_pool, we);
1425 1.2 simonb }
1426 1.2 simonb
1427 1.2 simonb brelse(bp, 0);
1428 1.2 simonb return;
1429 1.2 simonb }
1430 1.2 simonb
1431 1.2 simonb #ifdef ohbother
1432 1.44 uebayasi KDASSERT(bp->b_oflags & BO_DONE);
1433 1.44 uebayasi KDASSERT(!(bp->b_oflags & BO_DELWRI));
1434 1.2 simonb KDASSERT(bp->b_flags & B_ASYNC);
1435 1.44 uebayasi KDASSERT(bp->b_cflags & BC_BUSY);
1436 1.2 simonb KDASSERT(!(bp->b_flags & B_LOCKED));
1437 1.2 simonb KDASSERT(!(bp->b_flags & B_READ));
1438 1.44 uebayasi KDASSERT(!(bp->b_cflags & BC_INVAL));
1439 1.44 uebayasi KDASSERT(!(bp->b_cflags & BC_NOCACHE));
1440 1.2 simonb #endif
1441 1.2 simonb
1442 1.2 simonb if (bp->b_error) {
1443 1.26 apb /*
1444 1.78 riastrad * If an error occurs, it would be nice to leave the buffer
1445 1.78 riastrad * as a delayed write on the LRU queue so that we can retry
1446 1.78 riastrad * it later. But buffercache(9) can't handle dirty buffer
1447 1.78 riastrad * reuse, so just mark the log permanently errored out.
1448 1.26 apb */
1449 1.2 simonb mutex_enter(&wl->wl_mtx);
1450 1.2 simonb if (wl->wl_error_count == 0) {
1451 1.2 simonb wl->wl_error_count++;
1452 1.2 simonb cv_broadcast(&wl->wl_reclaimable_cv);
1453 1.2 simonb }
1454 1.2 simonb mutex_exit(&wl->wl_mtx);
1455 1.2 simonb }
1456 1.2 simonb
1457 1.53 hannken /*
1458 1.53 hannken * Release the buffer here. wapbl_flush() may wait for the
1459 1.53 hannken * log to become empty and we better unbusy the buffer before
1460 1.53 hannken * wapbl_flush() returns.
1461 1.53 hannken */
1462 1.53 hannken brelse(bp, 0);
1463 1.53 hannken
1464 1.2 simonb mutex_enter(&wl->wl_mtx);
1465 1.2 simonb
1466 1.2 simonb KASSERT(we->we_bufcount > 0);
1467 1.2 simonb we->we_bufcount--;
1468 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1469 1.53 hannken KASSERT(we->we_unsynced_bufbytes >= bufsize);
1470 1.53 hannken we->we_unsynced_bufbytes -= bufsize;
1471 1.53 hannken KASSERT(wl->wl_unsynced_bufbytes >= bufsize);
1472 1.53 hannken wl->wl_unsynced_bufbytes -= bufsize;
1473 1.2 simonb #endif
1474 1.2 simonb
1475 1.2 simonb /*
1476 1.2 simonb * If the current transaction can be reclaimed, start
1477 1.2 simonb * at the beginning and reclaim any consecutive reclaimable
1478 1.2 simonb * transactions. If we successfully reclaim anything,
1479 1.2 simonb * then wakeup anyone waiting for the reclaim.
1480 1.2 simonb */
1481 1.2 simonb if (we->we_bufcount == 0) {
1482 1.2 simonb size_t delta = 0;
1483 1.2 simonb int errcnt = 0;
1484 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1485 1.2 simonb KDASSERT(we->we_unsynced_bufbytes == 0);
1486 1.2 simonb #endif
1487 1.2 simonb /*
1488 1.2 simonb * clear any posted error, since the buffer it came from
1489 1.2 simonb * has successfully flushed by now
1490 1.2 simonb */
1491 1.2 simonb while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1492 1.2 simonb (we->we_bufcount == 0)) {
1493 1.2 simonb delta += we->we_reclaimable_bytes;
1494 1.2 simonb if (we->we_error)
1495 1.2 simonb errcnt++;
1496 1.2 simonb SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1497 1.51 para pool_put(&wapbl_entry_pool, we);
1498 1.2 simonb }
1499 1.2 simonb
1500 1.2 simonb if (delta) {
1501 1.2 simonb wl->wl_reclaimable_bytes += delta;
1502 1.2 simonb KASSERT(wl->wl_error_count >= errcnt);
1503 1.2 simonb wl->wl_error_count -= errcnt;
1504 1.2 simonb cv_broadcast(&wl->wl_reclaimable_cv);
1505 1.2 simonb }
1506 1.2 simonb }
1507 1.2 simonb
1508 1.2 simonb mutex_exit(&wl->wl_mtx);
1509 1.2 simonb }
1510 1.2 simonb
1511 1.2 simonb /*
1512 1.71 riastrad * wapbl_flush(wl, wait)
1513 1.71 riastrad *
1514 1.71 riastrad * Flush pending block writes, deallocations, and inodes from
1515 1.71 riastrad * the current transaction in memory to the log on disk:
1516 1.71 riastrad *
1517 1.71 riastrad * 1. Call the file system's wl_flush callback to flush any
1518 1.71 riastrad * per-file-system pending updates.
1519 1.71 riastrad * 2. Wait for enough space in the log for the current transaction.
1520 1.71 riastrad * 3. Synchronously write the new log records, advancing the
1521 1.71 riastrad * circular queue head.
1522 1.77 riastrad * 4. Issue the pending block writes asynchronously, now that they
1523 1.77 riastrad * are recorded in the log and can be replayed after crash.
1524 1.77 riastrad * 5. If wait is true, wait for all writes to complete and for the
1525 1.77 riastrad * log to become empty.
1526 1.71 riastrad *
1527 1.71 riastrad * On failure, call the file system's wl_flush_abort callback.
1528 1.2 simonb */
1529 1.2 simonb int
1530 1.2 simonb wapbl_flush(struct wapbl *wl, int waitfor)
1531 1.2 simonb {
1532 1.2 simonb struct buf *bp;
1533 1.2 simonb struct wapbl_entry *we;
1534 1.2 simonb off_t off;
1535 1.2 simonb off_t head;
1536 1.2 simonb off_t tail;
1537 1.2 simonb size_t delta = 0;
1538 1.2 simonb size_t flushsize;
1539 1.2 simonb size_t reserved;
1540 1.2 simonb int error = 0;
1541 1.2 simonb
1542 1.2 simonb /*
1543 1.2 simonb * Do a quick check to see if a full flush can be skipped
1544 1.2 simonb * This assumes that the flush callback does not need to be called
1545 1.2 simonb * unless there are other outstanding bufs.
1546 1.2 simonb */
1547 1.2 simonb if (!waitfor) {
1548 1.2 simonb size_t nbufs;
1549 1.2 simonb mutex_enter(&wl->wl_mtx); /* XXX need mutex here to
1550 1.2 simonb protect the KASSERTS */
1551 1.2 simonb nbufs = wl->wl_bufcount;
1552 1.2 simonb KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1553 1.2 simonb KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1554 1.2 simonb mutex_exit(&wl->wl_mtx);
1555 1.2 simonb if (nbufs == 0)
1556 1.2 simonb return 0;
1557 1.2 simonb }
1558 1.2 simonb
1559 1.2 simonb /*
1560 1.2 simonb * XXX we may consider using LK_UPGRADE here
1561 1.2 simonb * if we want to call flush from inside a transaction
1562 1.2 simonb */
1563 1.2 simonb rw_enter(&wl->wl_rwlock, RW_WRITER);
1564 1.2 simonb wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
1565 1.2 simonb wl->wl_dealloccnt);
1566 1.2 simonb
1567 1.2 simonb /*
1568 1.75 riastrad * Now that we are exclusively locked and the file system has
1569 1.75 riastrad * issued any deferred block writes for this transaction, check
1570 1.75 riastrad * whether there are any blocks to write to the log. If not,
1571 1.75 riastrad * skip waiting for space or writing any log entries.
1572 1.75 riastrad *
1573 1.75 riastrad * XXX Shouldn't this also check wl_dealloccnt and
1574 1.75 riastrad * wl_inohashcnt? Perhaps wl_dealloccnt doesn't matter if the
1575 1.75 riastrad * file system didn't produce any blocks as a consequence of
1576 1.75 riastrad * it, but the same does not seem to be so of wl_inohashcnt.
1577 1.2 simonb */
1578 1.2 simonb if (wl->wl_bufcount == 0) {
1579 1.69 riastrad goto wait_out;
1580 1.2 simonb }
1581 1.2 simonb
1582 1.2 simonb #if 0
1583 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1584 1.2 simonb ("wapbl_flush thread %d.%d flushing entries with "
1585 1.2 simonb "bufcount=%zu bufbytes=%zu\n",
1586 1.2 simonb curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1587 1.2 simonb wl->wl_bufbytes));
1588 1.2 simonb #endif
1589 1.2 simonb
1590 1.2 simonb /* Calculate amount of space needed to flush */
1591 1.2 simonb flushsize = wapbl_transaction_len(wl);
1592 1.39 christos if (wapbl_verbose_commit) {
1593 1.39 christos struct timespec ts;
1594 1.39 christos getnanotime(&ts);
1595 1.43 nakayama printf("%s: %lld.%09ld this transaction = %zu bytes\n",
1596 1.39 christos __func__, (long long)ts.tv_sec,
1597 1.39 christos (long)ts.tv_nsec, flushsize);
1598 1.39 christos }
1599 1.2 simonb
1600 1.2 simonb if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1601 1.2 simonb /*
1602 1.2 simonb * XXX this could be handled more gracefully, perhaps place
1603 1.2 simonb * only a partial transaction in the log and allow the
1604 1.2 simonb * remaining to flush without the protection of the journal.
1605 1.2 simonb */
1606 1.66 riastrad panic("wapbl_flush: current transaction too big to flush");
1607 1.2 simonb }
1608 1.2 simonb
1609 1.73 riastrad error = wapbl_truncate(wl, flushsize);
1610 1.2 simonb if (error)
1611 1.69 riastrad goto out;
1612 1.2 simonb
1613 1.2 simonb off = wl->wl_head;
1614 1.70 riastrad KASSERT((off == 0) || (off >= wl->wl_circ_off));
1615 1.70 riastrad KASSERT((off == 0) || (off < wl->wl_circ_off + wl->wl_circ_size));
1616 1.2 simonb error = wapbl_write_blocks(wl, &off);
1617 1.2 simonb if (error)
1618 1.69 riastrad goto out;
1619 1.2 simonb error = wapbl_write_revocations(wl, &off);
1620 1.2 simonb if (error)
1621 1.69 riastrad goto out;
1622 1.2 simonb error = wapbl_write_inodes(wl, &off);
1623 1.2 simonb if (error)
1624 1.69 riastrad goto out;
1625 1.2 simonb
1626 1.2 simonb reserved = 0;
1627 1.2 simonb if (wl->wl_inohashcnt)
1628 1.2 simonb reserved = wapbl_transaction_inodes_len(wl);
1629 1.2 simonb
1630 1.2 simonb head = wl->wl_head;
1631 1.2 simonb tail = wl->wl_tail;
1632 1.2 simonb
1633 1.2 simonb wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1634 1.2 simonb &head, &tail);
1635 1.72 riastrad
1636 1.72 riastrad KASSERTMSG(head == off,
1637 1.72 riastrad "lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1638 1.72 riastrad " off=%"PRIdMAX" flush=%zu",
1639 1.72 riastrad (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1640 1.72 riastrad flushsize);
1641 1.2 simonb
1642 1.2 simonb /* Opportunistically move the tail forward if we can */
1643 1.73 riastrad mutex_enter(&wl->wl_mtx);
1644 1.73 riastrad delta = wl->wl_reclaimable_bytes;
1645 1.73 riastrad mutex_exit(&wl->wl_mtx);
1646 1.73 riastrad wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1647 1.73 riastrad &head, &tail);
1648 1.2 simonb
1649 1.2 simonb error = wapbl_write_commit(wl, head, tail);
1650 1.2 simonb if (error)
1651 1.69 riastrad goto out;
1652 1.2 simonb
1653 1.51 para we = pool_get(&wapbl_entry_pool, PR_WAITOK);
1654 1.2 simonb
1655 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1656 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1657 1.2 simonb ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1658 1.2 simonb " unsynced=%zu"
1659 1.2 simonb "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1660 1.2 simonb "inodes=%d\n",
1661 1.2 simonb curproc->p_pid, curlwp->l_lid, flushsize, delta,
1662 1.2 simonb wapbl_space_used(wl->wl_circ_size, head, tail),
1663 1.2 simonb wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1664 1.2 simonb wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1665 1.2 simonb wl->wl_inohashcnt));
1666 1.2 simonb #else
1667 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1668 1.2 simonb ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1669 1.2 simonb "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1670 1.2 simonb "inodes=%d\n",
1671 1.2 simonb curproc->p_pid, curlwp->l_lid, flushsize, delta,
1672 1.2 simonb wapbl_space_used(wl->wl_circ_size, head, tail),
1673 1.2 simonb wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1674 1.2 simonb wl->wl_dealloccnt, wl->wl_inohashcnt));
1675 1.2 simonb #endif
1676 1.2 simonb
1677 1.2 simonb
1678 1.2 simonb mutex_enter(&bufcache_lock);
1679 1.2 simonb mutex_enter(&wl->wl_mtx);
1680 1.2 simonb
1681 1.2 simonb wl->wl_reserved_bytes = reserved;
1682 1.2 simonb wl->wl_head = head;
1683 1.2 simonb wl->wl_tail = tail;
1684 1.2 simonb KASSERT(wl->wl_reclaimable_bytes >= delta);
1685 1.2 simonb wl->wl_reclaimable_bytes -= delta;
1686 1.2 simonb wl->wl_dealloccnt = 0;
1687 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1688 1.2 simonb wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1689 1.2 simonb #endif
1690 1.2 simonb
1691 1.2 simonb we->we_wapbl = wl;
1692 1.2 simonb we->we_bufcount = wl->wl_bufcount;
1693 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1694 1.2 simonb we->we_unsynced_bufbytes = wl->wl_bufbytes;
1695 1.2 simonb #endif
1696 1.2 simonb we->we_reclaimable_bytes = flushsize;
1697 1.2 simonb we->we_error = 0;
1698 1.2 simonb SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1699 1.2 simonb
1700 1.2 simonb /*
1701 1.2 simonb * this flushes bufs in reverse order than they were queued
1702 1.2 simonb * it shouldn't matter, but if we care we could use TAILQ instead.
1703 1.2 simonb * XXX Note they will get put on the lru queue when they flush
1704 1.2 simonb * so we might actually want to change this to preserve order.
1705 1.2 simonb */
1706 1.2 simonb while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
1707 1.2 simonb if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1708 1.2 simonb continue;
1709 1.2 simonb }
1710 1.2 simonb bp->b_iodone = wapbl_biodone;
1711 1.2 simonb bp->b_private = we;
1712 1.2 simonb bremfree(bp);
1713 1.2 simonb wapbl_remove_buf_locked(wl, bp);
1714 1.2 simonb mutex_exit(&wl->wl_mtx);
1715 1.2 simonb mutex_exit(&bufcache_lock);
1716 1.2 simonb bawrite(bp);
1717 1.2 simonb mutex_enter(&bufcache_lock);
1718 1.2 simonb mutex_enter(&wl->wl_mtx);
1719 1.2 simonb }
1720 1.2 simonb mutex_exit(&wl->wl_mtx);
1721 1.2 simonb mutex_exit(&bufcache_lock);
1722 1.2 simonb
1723 1.2 simonb #if 0
1724 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1725 1.2 simonb ("wapbl_flush thread %d.%d done flushing entries...\n",
1726 1.2 simonb curproc->p_pid, curlwp->l_lid));
1727 1.2 simonb #endif
1728 1.2 simonb
1729 1.69 riastrad wait_out:
1730 1.2 simonb
1731 1.2 simonb /*
1732 1.2 simonb * If the waitfor flag is set, don't return until everything is
1733 1.2 simonb * fully flushed and the on disk log is empty.
1734 1.2 simonb */
1735 1.2 simonb if (waitfor) {
1736 1.2 simonb error = wapbl_truncate(wl, wl->wl_circ_size -
1737 1.73 riastrad wl->wl_reserved_bytes);
1738 1.2 simonb }
1739 1.2 simonb
1740 1.69 riastrad out:
1741 1.2 simonb if (error) {
1742 1.2 simonb wl->wl_flush_abort(wl->wl_mount, wl->wl_deallocblks,
1743 1.2 simonb wl->wl_dealloclens, wl->wl_dealloccnt);
1744 1.2 simonb }
1745 1.2 simonb
1746 1.2 simonb #ifdef WAPBL_DEBUG_PRINT
1747 1.2 simonb if (error) {
1748 1.2 simonb pid_t pid = -1;
1749 1.2 simonb lwpid_t lid = -1;
1750 1.2 simonb if (curproc)
1751 1.2 simonb pid = curproc->p_pid;
1752 1.2 simonb if (curlwp)
1753 1.2 simonb lid = curlwp->l_lid;
1754 1.2 simonb mutex_enter(&wl->wl_mtx);
1755 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1756 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1757 1.2 simonb ("wapbl_flush: thread %d.%d aborted flush: "
1758 1.2 simonb "error = %d\n"
1759 1.2 simonb "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1760 1.2 simonb "deallocs=%d inodes=%d\n"
1761 1.2 simonb "\terrcnt = %d, reclaimable=%zu reserved=%zu "
1762 1.2 simonb "unsynced=%zu\n",
1763 1.2 simonb pid, lid, error, wl->wl_bufcount,
1764 1.2 simonb wl->wl_bufbytes, wl->wl_bcount,
1765 1.2 simonb wl->wl_dealloccnt, wl->wl_inohashcnt,
1766 1.2 simonb wl->wl_error_count, wl->wl_reclaimable_bytes,
1767 1.2 simonb wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1768 1.2 simonb SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1769 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1770 1.2 simonb ("\tentry: bufcount = %zu, reclaimable = %zu, "
1771 1.2 simonb "error = %d, unsynced = %zu\n",
1772 1.2 simonb we->we_bufcount, we->we_reclaimable_bytes,
1773 1.2 simonb we->we_error, we->we_unsynced_bufbytes));
1774 1.2 simonb }
1775 1.2 simonb #else
1776 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1777 1.2 simonb ("wapbl_flush: thread %d.%d aborted flush: "
1778 1.2 simonb "error = %d\n"
1779 1.2 simonb "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1780 1.2 simonb "deallocs=%d inodes=%d\n"
1781 1.2 simonb "\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
1782 1.2 simonb pid, lid, error, wl->wl_bufcount,
1783 1.2 simonb wl->wl_bufbytes, wl->wl_bcount,
1784 1.2 simonb wl->wl_dealloccnt, wl->wl_inohashcnt,
1785 1.2 simonb wl->wl_error_count, wl->wl_reclaimable_bytes,
1786 1.2 simonb wl->wl_reserved_bytes));
1787 1.2 simonb SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1788 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1789 1.2 simonb ("\tentry: bufcount = %zu, reclaimable = %zu, "
1790 1.2 simonb "error = %d\n", we->we_bufcount,
1791 1.2 simonb we->we_reclaimable_bytes, we->we_error));
1792 1.2 simonb }
1793 1.2 simonb #endif
1794 1.2 simonb mutex_exit(&wl->wl_mtx);
1795 1.2 simonb }
1796 1.2 simonb #endif
1797 1.2 simonb
1798 1.2 simonb rw_exit(&wl->wl_rwlock);
1799 1.2 simonb return error;
1800 1.2 simonb }
1801 1.2 simonb
1802 1.2 simonb /****************************************************************/
1803 1.2 simonb
1804 1.2 simonb void
1805 1.2 simonb wapbl_jlock_assert(struct wapbl *wl)
1806 1.2 simonb {
1807 1.2 simonb
1808 1.23 ad KASSERT(rw_lock_held(&wl->wl_rwlock));
1809 1.2 simonb }
1810 1.2 simonb
1811 1.2 simonb void
1812 1.2 simonb wapbl_junlock_assert(struct wapbl *wl)
1813 1.2 simonb {
1814 1.2 simonb
1815 1.2 simonb KASSERT(!rw_write_held(&wl->wl_rwlock));
1816 1.2 simonb }
1817 1.2 simonb
1818 1.2 simonb /****************************************************************/
1819 1.2 simonb
1820 1.2 simonb /* locks missing */
1821 1.2 simonb void
1822 1.2 simonb wapbl_print(struct wapbl *wl,
1823 1.2 simonb int full,
1824 1.2 simonb void (*pr)(const char *, ...))
1825 1.2 simonb {
1826 1.2 simonb struct buf *bp;
1827 1.2 simonb struct wapbl_entry *we;
1828 1.2 simonb (*pr)("wapbl %p", wl);
1829 1.2 simonb (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
1830 1.2 simonb wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
1831 1.2 simonb (*pr)("circ = %zu, header = %zu, head = %"PRIdMAX" tail = %"PRIdMAX"\n",
1832 1.2 simonb wl->wl_circ_size, wl->wl_circ_off,
1833 1.2 simonb (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
1834 1.2 simonb (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
1835 1.2 simonb wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
1836 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1837 1.2 simonb (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1838 1.2 simonb "reserved = %zu errcnt = %d unsynced = %zu\n",
1839 1.2 simonb wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1840 1.2 simonb wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1841 1.2 simonb wl->wl_error_count, wl->wl_unsynced_bufbytes);
1842 1.2 simonb #else
1843 1.2 simonb (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1844 1.2 simonb "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
1845 1.2 simonb wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1846 1.2 simonb wl->wl_error_count);
1847 1.2 simonb #endif
1848 1.2 simonb (*pr)("\tdealloccnt = %d, dealloclim = %d\n",
1849 1.2 simonb wl->wl_dealloccnt, wl->wl_dealloclim);
1850 1.2 simonb (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
1851 1.2 simonb wl->wl_inohashcnt, wl->wl_inohashmask);
1852 1.2 simonb (*pr)("entries:\n");
1853 1.2 simonb SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1854 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1855 1.2 simonb (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
1856 1.2 simonb "unsynced = %zu\n",
1857 1.2 simonb we->we_bufcount, we->we_reclaimable_bytes,
1858 1.2 simonb we->we_error, we->we_unsynced_bufbytes);
1859 1.2 simonb #else
1860 1.2 simonb (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
1861 1.2 simonb we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
1862 1.2 simonb #endif
1863 1.2 simonb }
1864 1.2 simonb if (full) {
1865 1.2 simonb int cnt = 0;
1866 1.2 simonb (*pr)("bufs =");
1867 1.2 simonb LIST_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
1868 1.2 simonb if (!LIST_NEXT(bp, b_wapbllist)) {
1869 1.2 simonb (*pr)(" %p", bp);
1870 1.2 simonb } else if ((++cnt % 6) == 0) {
1871 1.2 simonb (*pr)(" %p,\n\t", bp);
1872 1.2 simonb } else {
1873 1.2 simonb (*pr)(" %p,", bp);
1874 1.2 simonb }
1875 1.2 simonb }
1876 1.2 simonb (*pr)("\n");
1877 1.2 simonb
1878 1.2 simonb (*pr)("dealloced blks = ");
1879 1.2 simonb {
1880 1.2 simonb int i;
1881 1.2 simonb cnt = 0;
1882 1.2 simonb for (i = 0; i < wl->wl_dealloccnt; i++) {
1883 1.2 simonb (*pr)(" %"PRId64":%d,",
1884 1.2 simonb wl->wl_deallocblks[i],
1885 1.2 simonb wl->wl_dealloclens[i]);
1886 1.2 simonb if ((++cnt % 4) == 0) {
1887 1.2 simonb (*pr)("\n\t");
1888 1.2 simonb }
1889 1.2 simonb }
1890 1.2 simonb }
1891 1.2 simonb (*pr)("\n");
1892 1.2 simonb
1893 1.2 simonb (*pr)("registered inodes = ");
1894 1.2 simonb {
1895 1.2 simonb int i;
1896 1.2 simonb cnt = 0;
1897 1.2 simonb for (i = 0; i <= wl->wl_inohashmask; i++) {
1898 1.2 simonb struct wapbl_ino_head *wih;
1899 1.2 simonb struct wapbl_ino *wi;
1900 1.2 simonb
1901 1.2 simonb wih = &wl->wl_inohash[i];
1902 1.2 simonb LIST_FOREACH(wi, wih, wi_hash) {
1903 1.2 simonb if (wi->wi_ino == 0)
1904 1.2 simonb continue;
1905 1.55 christos (*pr)(" %"PRIu64"/0%06"PRIo32",",
1906 1.2 simonb wi->wi_ino, wi->wi_mode);
1907 1.2 simonb if ((++cnt % 4) == 0) {
1908 1.2 simonb (*pr)("\n\t");
1909 1.2 simonb }
1910 1.2 simonb }
1911 1.2 simonb }
1912 1.2 simonb (*pr)("\n");
1913 1.2 simonb }
1914 1.2 simonb }
1915 1.2 simonb }
1916 1.2 simonb
1917 1.2 simonb #if defined(WAPBL_DEBUG) || defined(DDB)
1918 1.2 simonb void
1919 1.2 simonb wapbl_dump(struct wapbl *wl)
1920 1.2 simonb {
1921 1.2 simonb #if defined(WAPBL_DEBUG)
1922 1.2 simonb if (!wl)
1923 1.2 simonb wl = wapbl_debug_wl;
1924 1.2 simonb #endif
1925 1.2 simonb if (!wl)
1926 1.2 simonb return;
1927 1.2 simonb wapbl_print(wl, 1, printf);
1928 1.2 simonb }
1929 1.2 simonb #endif
1930 1.2 simonb
1931 1.2 simonb /****************************************************************/
1932 1.2 simonb
1933 1.2 simonb void
1934 1.2 simonb wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len)
1935 1.2 simonb {
1936 1.2 simonb
1937 1.2 simonb wapbl_jlock_assert(wl);
1938 1.2 simonb
1939 1.38 hannken mutex_enter(&wl->wl_mtx);
1940 1.2 simonb /* XXX should eventually instead tie this into resource estimation */
1941 1.27 pooka /*
1942 1.27 pooka * XXX this panic needs locking/mutex analysis and the
1943 1.27 pooka * ability to cope with the failure.
1944 1.27 pooka */
1945 1.27 pooka /* XXX this XXX doesn't have enough XXX */
1946 1.27 pooka if (__predict_false(wl->wl_dealloccnt >= wl->wl_dealloclim))
1947 1.27 pooka panic("wapbl_register_deallocation: out of resources");
1948 1.27 pooka
1949 1.2 simonb wl->wl_deallocblks[wl->wl_dealloccnt] = blk;
1950 1.2 simonb wl->wl_dealloclens[wl->wl_dealloccnt] = len;
1951 1.2 simonb wl->wl_dealloccnt++;
1952 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
1953 1.2 simonb ("wapbl_register_deallocation: blk=%"PRId64" len=%d\n", blk, len));
1954 1.38 hannken mutex_exit(&wl->wl_mtx);
1955 1.2 simonb }
1956 1.2 simonb
1957 1.2 simonb /****************************************************************/
1958 1.2 simonb
1959 1.2 simonb static void
1960 1.2 simonb wapbl_inodetrk_init(struct wapbl *wl, u_int size)
1961 1.2 simonb {
1962 1.2 simonb
1963 1.2 simonb wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
1964 1.2 simonb if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) {
1965 1.2 simonb pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0,
1966 1.2 simonb "wapblinopl", &pool_allocator_nointr, IPL_NONE);
1967 1.2 simonb }
1968 1.2 simonb }
1969 1.2 simonb
1970 1.2 simonb static void
1971 1.2 simonb wapbl_inodetrk_free(struct wapbl *wl)
1972 1.2 simonb {
1973 1.2 simonb
1974 1.2 simonb /* XXX this KASSERT needs locking/mutex analysis */
1975 1.2 simonb KASSERT(wl->wl_inohashcnt == 0);
1976 1.2 simonb hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
1977 1.2 simonb if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) {
1978 1.2 simonb pool_destroy(&wapbl_ino_pool);
1979 1.2 simonb }
1980 1.2 simonb }
1981 1.2 simonb
1982 1.2 simonb static struct wapbl_ino *
1983 1.2 simonb wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
1984 1.2 simonb {
1985 1.2 simonb struct wapbl_ino_head *wih;
1986 1.2 simonb struct wapbl_ino *wi;
1987 1.2 simonb
1988 1.2 simonb KASSERT(mutex_owned(&wl->wl_mtx));
1989 1.2 simonb
1990 1.2 simonb wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1991 1.2 simonb LIST_FOREACH(wi, wih, wi_hash) {
1992 1.2 simonb if (ino == wi->wi_ino)
1993 1.2 simonb return wi;
1994 1.2 simonb }
1995 1.2 simonb return 0;
1996 1.2 simonb }
1997 1.2 simonb
1998 1.2 simonb void
1999 1.2 simonb wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
2000 1.2 simonb {
2001 1.2 simonb struct wapbl_ino_head *wih;
2002 1.2 simonb struct wapbl_ino *wi;
2003 1.2 simonb
2004 1.2 simonb wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
2005 1.2 simonb
2006 1.2 simonb mutex_enter(&wl->wl_mtx);
2007 1.2 simonb if (wapbl_inodetrk_get(wl, ino) == NULL) {
2008 1.2 simonb wi->wi_ino = ino;
2009 1.2 simonb wi->wi_mode = mode;
2010 1.2 simonb wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
2011 1.2 simonb LIST_INSERT_HEAD(wih, wi, wi_hash);
2012 1.2 simonb wl->wl_inohashcnt++;
2013 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_INODE,
2014 1.2 simonb ("wapbl_register_inode: ino=%"PRId64"\n", ino));
2015 1.2 simonb mutex_exit(&wl->wl_mtx);
2016 1.2 simonb } else {
2017 1.2 simonb mutex_exit(&wl->wl_mtx);
2018 1.2 simonb pool_put(&wapbl_ino_pool, wi);
2019 1.2 simonb }
2020 1.2 simonb }
2021 1.2 simonb
2022 1.2 simonb void
2023 1.2 simonb wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
2024 1.2 simonb {
2025 1.2 simonb struct wapbl_ino *wi;
2026 1.2 simonb
2027 1.2 simonb mutex_enter(&wl->wl_mtx);
2028 1.2 simonb wi = wapbl_inodetrk_get(wl, ino);
2029 1.2 simonb if (wi) {
2030 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_INODE,
2031 1.2 simonb ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
2032 1.2 simonb KASSERT(wl->wl_inohashcnt > 0);
2033 1.2 simonb wl->wl_inohashcnt--;
2034 1.2 simonb LIST_REMOVE(wi, wi_hash);
2035 1.2 simonb mutex_exit(&wl->wl_mtx);
2036 1.2 simonb
2037 1.2 simonb pool_put(&wapbl_ino_pool, wi);
2038 1.2 simonb } else {
2039 1.2 simonb mutex_exit(&wl->wl_mtx);
2040 1.2 simonb }
2041 1.2 simonb }
2042 1.2 simonb
2043 1.2 simonb /****************************************************************/
2044 1.2 simonb
2045 1.71 riastrad /*
2046 1.71 riastrad * wapbl_transaction_inodes_len(wl)
2047 1.71 riastrad *
2048 1.71 riastrad * Calculate the number of bytes required for inode registration
2049 1.71 riastrad * log records in wl.
2050 1.71 riastrad */
2051 1.30 uebayasi static inline size_t
2052 1.2 simonb wapbl_transaction_inodes_len(struct wapbl *wl)
2053 1.2 simonb {
2054 1.2 simonb int blocklen = 1<<wl->wl_log_dev_bshift;
2055 1.2 simonb int iph;
2056 1.2 simonb
2057 1.2 simonb /* Calculate number of inodes described in a inodelist header */
2058 1.2 simonb iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2059 1.2 simonb sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2060 1.2 simonb
2061 1.2 simonb KASSERT(iph > 0);
2062 1.2 simonb
2063 1.39 christos return MAX(1, howmany(wl->wl_inohashcnt, iph)) * blocklen;
2064 1.2 simonb }
2065 1.2 simonb
2066 1.2 simonb
2067 1.71 riastrad /*
2068 1.71 riastrad * wapbl_transaction_len(wl)
2069 1.71 riastrad *
2070 1.71 riastrad * Calculate number of bytes required for all log records in wl.
2071 1.71 riastrad */
2072 1.2 simonb static size_t
2073 1.2 simonb wapbl_transaction_len(struct wapbl *wl)
2074 1.2 simonb {
2075 1.2 simonb int blocklen = 1<<wl->wl_log_dev_bshift;
2076 1.2 simonb size_t len;
2077 1.2 simonb
2078 1.80 jdolecek /* Calculate number of blocks described in a blocklist header */
2079 1.2 simonb len = wl->wl_bcount;
2080 1.79 jdolecek len += howmany(wl->wl_bufcount, wl->wl_brperjblock) * blocklen;
2081 1.79 jdolecek len += howmany(wl->wl_dealloccnt, wl->wl_brperjblock) * blocklen;
2082 1.2 simonb len += wapbl_transaction_inodes_len(wl);
2083 1.2 simonb
2084 1.2 simonb return len;
2085 1.2 simonb }
2086 1.2 simonb
2087 1.2 simonb /*
2088 1.71 riastrad * wapbl_cache_sync(wl, msg)
2089 1.71 riastrad *
2090 1.71 riastrad * Issue DIOCCACHESYNC to wl->wl_devvp.
2091 1.71 riastrad *
2092 1.71 riastrad * If sysctl(vfs.wapbl.verbose_commit) >= 2, print a message
2093 1.71 riastrad * including msg about the duration of the cache sync.
2094 1.48 yamt */
2095 1.48 yamt static int
2096 1.48 yamt wapbl_cache_sync(struct wapbl *wl, const char *msg)
2097 1.48 yamt {
2098 1.48 yamt const bool verbose = wapbl_verbose_commit >= 2;
2099 1.48 yamt struct bintime start_time;
2100 1.48 yamt int force = 1;
2101 1.48 yamt int error;
2102 1.48 yamt
2103 1.48 yamt if (!wapbl_flush_disk_cache) {
2104 1.48 yamt return 0;
2105 1.48 yamt }
2106 1.48 yamt if (verbose) {
2107 1.48 yamt bintime(&start_time);
2108 1.48 yamt }
2109 1.48 yamt error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force,
2110 1.48 yamt FWRITE, FSCRED);
2111 1.48 yamt if (error) {
2112 1.48 yamt WAPBL_PRINTF(WAPBL_PRINT_ERROR,
2113 1.76 riastrad ("wapbl_cache_sync: DIOCCACHESYNC on dev 0x%jx "
2114 1.76 riastrad "returned %d\n", (uintmax_t)wl->wl_devvp->v_rdev, error));
2115 1.48 yamt }
2116 1.48 yamt if (verbose) {
2117 1.48 yamt struct bintime d;
2118 1.48 yamt struct timespec ts;
2119 1.48 yamt
2120 1.48 yamt bintime(&d);
2121 1.48 yamt bintime_sub(&d, &start_time);
2122 1.48 yamt bintime2timespec(&d, &ts);
2123 1.48 yamt printf("wapbl_cache_sync: %s: dev 0x%jx %ju.%09lu\n",
2124 1.48 yamt msg, (uintmax_t)wl->wl_devvp->v_rdev,
2125 1.48 yamt (uintmax_t)ts.tv_sec, ts.tv_nsec);
2126 1.48 yamt }
2127 1.48 yamt return error;
2128 1.48 yamt }
2129 1.48 yamt
2130 1.48 yamt /*
2131 1.71 riastrad * wapbl_write_commit(wl, head, tail)
2132 1.71 riastrad *
2133 1.71 riastrad * Issue a disk cache sync to wait for all pending writes to the
2134 1.71 riastrad * log to complete, and then synchronously commit the current
2135 1.71 riastrad * circular queue head and tail to the log, in the next of two
2136 1.71 riastrad * locations for commit headers on disk.
2137 1.2 simonb *
2138 1.71 riastrad * Increment the generation number. If the generation number
2139 1.71 riastrad * rolls over to zero, then a subsequent commit would appear to
2140 1.71 riastrad * have an older generation than this one -- in that case, issue a
2141 1.71 riastrad * duplicate commit to avoid this.
2142 1.71 riastrad *
2143 1.71 riastrad * => Caller must have exclusive access to wl, either by holding
2144 1.71 riastrad * wl->wl_rwlock for writer or by being wapbl_start before anyone
2145 1.71 riastrad * else has seen wl.
2146 1.2 simonb */
2147 1.2 simonb static int
2148 1.2 simonb wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
2149 1.2 simonb {
2150 1.2 simonb struct wapbl_wc_header *wc = wl->wl_wc_header;
2151 1.2 simonb struct timespec ts;
2152 1.2 simonb int error;
2153 1.34 mlelstv daddr_t pbn;
2154 1.2 simonb
2155 1.54 hannken error = wapbl_buffered_flush(wl);
2156 1.54 hannken if (error)
2157 1.54 hannken return error;
2158 1.49 yamt /*
2159 1.49 yamt * flush disk cache to ensure that blocks we've written are actually
2160 1.49 yamt * written to the stable storage before the commit header.
2161 1.49 yamt *
2162 1.49 yamt * XXX Calc checksum here, instead we do this for now
2163 1.49 yamt */
2164 1.48 yamt wapbl_cache_sync(wl, "1");
2165 1.2 simonb
2166 1.2 simonb wc->wc_head = head;
2167 1.2 simonb wc->wc_tail = tail;
2168 1.2 simonb wc->wc_checksum = 0;
2169 1.2 simonb wc->wc_version = 1;
2170 1.2 simonb getnanotime(&ts);
2171 1.17 yamt wc->wc_time = ts.tv_sec;
2172 1.2 simonb wc->wc_timensec = ts.tv_nsec;
2173 1.2 simonb
2174 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2175 1.2 simonb ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
2176 1.2 simonb (intmax_t)head, (intmax_t)tail));
2177 1.2 simonb
2178 1.2 simonb /*
2179 1.49 yamt * write the commit header.
2180 1.49 yamt *
2181 1.2 simonb * XXX if generation will rollover, then first zero
2182 1.2 simonb * over second commit header before trying to write both headers.
2183 1.2 simonb */
2184 1.2 simonb
2185 1.34 mlelstv pbn = wl->wl_logpbn + (wc->wc_generation % 2);
2186 1.34 mlelstv #ifdef _KERNEL
2187 1.34 mlelstv pbn = btodb(pbn << wc->wc_log_dev_bshift);
2188 1.34 mlelstv #endif
2189 1.54 hannken error = wapbl_buffered_write(wc, wc->wc_len, wl, pbn);
2190 1.54 hannken if (error)
2191 1.54 hannken return error;
2192 1.54 hannken error = wapbl_buffered_flush(wl);
2193 1.2 simonb if (error)
2194 1.2 simonb return error;
2195 1.2 simonb
2196 1.49 yamt /*
2197 1.49 yamt * flush disk cache to ensure that the commit header is actually
2198 1.49 yamt * written before meta data blocks.
2199 1.49 yamt */
2200 1.48 yamt wapbl_cache_sync(wl, "2");
2201 1.2 simonb
2202 1.2 simonb /*
2203 1.2 simonb * If the generation number was zero, write it out a second time.
2204 1.2 simonb * This handles initialization and generation number rollover
2205 1.2 simonb */
2206 1.2 simonb if (wc->wc_generation++ == 0) {
2207 1.2 simonb error = wapbl_write_commit(wl, head, tail);
2208 1.2 simonb /*
2209 1.2 simonb * This panic should be able to be removed if we do the
2210 1.2 simonb * zero'ing mentioned above, and we are certain to roll
2211 1.2 simonb * back generation number on failure.
2212 1.2 simonb */
2213 1.2 simonb if (error)
2214 1.2 simonb panic("wapbl_write_commit: error writing duplicate "
2215 1.66 riastrad "log header: %d", error);
2216 1.2 simonb }
2217 1.2 simonb return 0;
2218 1.2 simonb }
2219 1.2 simonb
2220 1.71 riastrad /*
2221 1.71 riastrad * wapbl_write_blocks(wl, offp)
2222 1.71 riastrad *
2223 1.71 riastrad * Write all pending physical blocks in the current transaction
2224 1.71 riastrad * from wapbl_add_buf to the log on disk, adding to the circular
2225 1.71 riastrad * queue head at byte offset *offp, and returning the new head's
2226 1.71 riastrad * byte offset in *offp.
2227 1.71 riastrad */
2228 1.2 simonb static int
2229 1.2 simonb wapbl_write_blocks(struct wapbl *wl, off_t *offp)
2230 1.2 simonb {
2231 1.2 simonb struct wapbl_wc_blocklist *wc =
2232 1.2 simonb (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2233 1.2 simonb int blocklen = 1<<wl->wl_log_dev_bshift;
2234 1.2 simonb struct buf *bp;
2235 1.2 simonb off_t off = *offp;
2236 1.2 simonb int error;
2237 1.7 joerg size_t padding;
2238 1.2 simonb
2239 1.2 simonb KASSERT(rw_write_held(&wl->wl_rwlock));
2240 1.2 simonb
2241 1.2 simonb bp = LIST_FIRST(&wl->wl_bufs);
2242 1.2 simonb
2243 1.2 simonb while (bp) {
2244 1.2 simonb int cnt;
2245 1.2 simonb struct buf *obp = bp;
2246 1.2 simonb
2247 1.2 simonb KASSERT(bp->b_flags & B_LOCKED);
2248 1.2 simonb
2249 1.2 simonb wc->wc_type = WAPBL_WC_BLOCKS;
2250 1.2 simonb wc->wc_len = blocklen;
2251 1.2 simonb wc->wc_blkcount = 0;
2252 1.79 jdolecek while (bp && (wc->wc_blkcount < wl->wl_brperjblock)) {
2253 1.2 simonb /*
2254 1.2 simonb * Make sure all the physical block numbers are up to
2255 1.2 simonb * date. If this is not always true on a given
2256 1.2 simonb * filesystem, then VOP_BMAP must be called. We
2257 1.2 simonb * could call VOP_BMAP here, or else in the filesystem
2258 1.2 simonb * specific flush callback, although neither of those
2259 1.2 simonb * solutions allow us to take the vnode lock. If a
2260 1.2 simonb * filesystem requires that we must take the vnode lock
2261 1.2 simonb * to call VOP_BMAP, then we can probably do it in
2262 1.2 simonb * bwrite when the vnode lock should already be held
2263 1.2 simonb * by the invoking code.
2264 1.2 simonb */
2265 1.2 simonb KASSERT((bp->b_vp->v_type == VBLK) ||
2266 1.2 simonb (bp->b_blkno != bp->b_lblkno));
2267 1.2 simonb KASSERT(bp->b_blkno > 0);
2268 1.2 simonb
2269 1.2 simonb wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
2270 1.2 simonb wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
2271 1.2 simonb wc->wc_len += bp->b_bcount;
2272 1.2 simonb wc->wc_blkcount++;
2273 1.2 simonb bp = LIST_NEXT(bp, b_wapbllist);
2274 1.2 simonb }
2275 1.7 joerg if (wc->wc_len % blocklen != 0) {
2276 1.7 joerg padding = blocklen - wc->wc_len % blocklen;
2277 1.7 joerg wc->wc_len += padding;
2278 1.7 joerg } else {
2279 1.7 joerg padding = 0;
2280 1.7 joerg }
2281 1.7 joerg
2282 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2283 1.7 joerg ("wapbl_write_blocks: len = %u (padding %zu) off = %"PRIdMAX"\n",
2284 1.7 joerg wc->wc_len, padding, (intmax_t)off));
2285 1.2 simonb
2286 1.2 simonb error = wapbl_circ_write(wl, wc, blocklen, &off);
2287 1.2 simonb if (error)
2288 1.2 simonb return error;
2289 1.2 simonb bp = obp;
2290 1.2 simonb cnt = 0;
2291 1.79 jdolecek while (bp && (cnt++ < wl->wl_brperjblock)) {
2292 1.2 simonb error = wapbl_circ_write(wl, bp->b_data,
2293 1.2 simonb bp->b_bcount, &off);
2294 1.2 simonb if (error)
2295 1.2 simonb return error;
2296 1.2 simonb bp = LIST_NEXT(bp, b_wapbllist);
2297 1.2 simonb }
2298 1.7 joerg if (padding) {
2299 1.7 joerg void *zero;
2300 1.7 joerg
2301 1.51 para zero = wapbl_alloc(padding);
2302 1.7 joerg memset(zero, 0, padding);
2303 1.7 joerg error = wapbl_circ_write(wl, zero, padding, &off);
2304 1.18 yamt wapbl_free(zero, padding);
2305 1.7 joerg if (error)
2306 1.7 joerg return error;
2307 1.7 joerg }
2308 1.2 simonb }
2309 1.2 simonb *offp = off;
2310 1.2 simonb return 0;
2311 1.2 simonb }
2312 1.2 simonb
2313 1.71 riastrad /*
2314 1.71 riastrad * wapbl_write_revocations(wl, offp)
2315 1.71 riastrad *
2316 1.71 riastrad * Write all pending deallocations in the current transaction from
2317 1.71 riastrad * wapbl_register_deallocation to the log on disk, adding to the
2318 1.71 riastrad * circular queue's head at byte offset *offp, and returning the
2319 1.71 riastrad * new head's byte offset in *offp.
2320 1.71 riastrad */
2321 1.2 simonb static int
2322 1.2 simonb wapbl_write_revocations(struct wapbl *wl, off_t *offp)
2323 1.2 simonb {
2324 1.2 simonb struct wapbl_wc_blocklist *wc =
2325 1.2 simonb (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2326 1.2 simonb int i;
2327 1.2 simonb int blocklen = 1<<wl->wl_log_dev_bshift;
2328 1.2 simonb off_t off = *offp;
2329 1.2 simonb int error;
2330 1.2 simonb
2331 1.2 simonb if (wl->wl_dealloccnt == 0)
2332 1.2 simonb return 0;
2333 1.2 simonb
2334 1.2 simonb i = 0;
2335 1.2 simonb while (i < wl->wl_dealloccnt) {
2336 1.2 simonb wc->wc_type = WAPBL_WC_REVOCATIONS;
2337 1.2 simonb wc->wc_len = blocklen;
2338 1.2 simonb wc->wc_blkcount = 0;
2339 1.79 jdolecek while ((i < wl->wl_dealloccnt) && (wc->wc_blkcount < wl->wl_brperjblock)) {
2340 1.2 simonb wc->wc_blocks[wc->wc_blkcount].wc_daddr =
2341 1.2 simonb wl->wl_deallocblks[i];
2342 1.2 simonb wc->wc_blocks[wc->wc_blkcount].wc_dlen =
2343 1.2 simonb wl->wl_dealloclens[i];
2344 1.2 simonb wc->wc_blkcount++;
2345 1.2 simonb i++;
2346 1.2 simonb }
2347 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2348 1.2 simonb ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
2349 1.2 simonb wc->wc_len, (intmax_t)off));
2350 1.2 simonb error = wapbl_circ_write(wl, wc, blocklen, &off);
2351 1.2 simonb if (error)
2352 1.2 simonb return error;
2353 1.2 simonb }
2354 1.2 simonb *offp = off;
2355 1.2 simonb return 0;
2356 1.2 simonb }
2357 1.2 simonb
2358 1.71 riastrad /*
2359 1.71 riastrad * wapbl_write_inodes(wl, offp)
2360 1.71 riastrad *
2361 1.71 riastrad * Write all pending inode allocations in the current transaction
2362 1.71 riastrad * from wapbl_register_inode to the log on disk, adding to the
2363 1.71 riastrad * circular queue's head at byte offset *offp and returning the
2364 1.71 riastrad * new head's byte offset in *offp.
2365 1.71 riastrad */
2366 1.2 simonb static int
2367 1.2 simonb wapbl_write_inodes(struct wapbl *wl, off_t *offp)
2368 1.2 simonb {
2369 1.2 simonb struct wapbl_wc_inodelist *wc =
2370 1.2 simonb (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
2371 1.2 simonb int i;
2372 1.14 joerg int blocklen = 1 << wl->wl_log_dev_bshift;
2373 1.2 simonb off_t off = *offp;
2374 1.2 simonb int error;
2375 1.2 simonb
2376 1.2 simonb struct wapbl_ino_head *wih;
2377 1.2 simonb struct wapbl_ino *wi;
2378 1.2 simonb int iph;
2379 1.2 simonb
2380 1.2 simonb iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2381 1.2 simonb sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2382 1.2 simonb
2383 1.2 simonb i = 0;
2384 1.2 simonb wih = &wl->wl_inohash[0];
2385 1.2 simonb wi = 0;
2386 1.2 simonb do {
2387 1.2 simonb wc->wc_type = WAPBL_WC_INODES;
2388 1.2 simonb wc->wc_len = blocklen;
2389 1.2 simonb wc->wc_inocnt = 0;
2390 1.2 simonb wc->wc_clear = (i == 0);
2391 1.2 simonb while ((i < wl->wl_inohashcnt) && (wc->wc_inocnt < iph)) {
2392 1.2 simonb while (!wi) {
2393 1.2 simonb KASSERT((wih - &wl->wl_inohash[0])
2394 1.2 simonb <= wl->wl_inohashmask);
2395 1.2 simonb wi = LIST_FIRST(wih++);
2396 1.2 simonb }
2397 1.2 simonb wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2398 1.2 simonb wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2399 1.2 simonb wc->wc_inocnt++;
2400 1.2 simonb i++;
2401 1.2 simonb wi = LIST_NEXT(wi, wi_hash);
2402 1.2 simonb }
2403 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2404 1.2 simonb ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2405 1.2 simonb wc->wc_len, (intmax_t)off));
2406 1.2 simonb error = wapbl_circ_write(wl, wc, blocklen, &off);
2407 1.2 simonb if (error)
2408 1.2 simonb return error;
2409 1.2 simonb } while (i < wl->wl_inohashcnt);
2410 1.2 simonb
2411 1.2 simonb *offp = off;
2412 1.2 simonb return 0;
2413 1.2 simonb }
2414 1.2 simonb
2415 1.2 simonb #endif /* _KERNEL */
2416 1.2 simonb
2417 1.2 simonb /****************************************************************/
2418 1.2 simonb
2419 1.2 simonb struct wapbl_blk {
2420 1.2 simonb LIST_ENTRY(wapbl_blk) wb_hash;
2421 1.2 simonb daddr_t wb_blk;
2422 1.2 simonb off_t wb_off; /* Offset of this block in the log */
2423 1.2 simonb };
2424 1.2 simonb #define WAPBL_BLKPOOL_MIN 83
2425 1.2 simonb
2426 1.2 simonb static void
2427 1.2 simonb wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2428 1.2 simonb {
2429 1.2 simonb if (size < WAPBL_BLKPOOL_MIN)
2430 1.2 simonb size = WAPBL_BLKPOOL_MIN;
2431 1.2 simonb KASSERT(wr->wr_blkhash == 0);
2432 1.2 simonb #ifdef _KERNEL
2433 1.2 simonb wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2434 1.2 simonb #else /* ! _KERNEL */
2435 1.2 simonb /* Manually implement hashinit */
2436 1.2 simonb {
2437 1.25 lukem unsigned long i, hashsize;
2438 1.2 simonb for (hashsize = 1; hashsize < size; hashsize <<= 1)
2439 1.2 simonb continue;
2440 1.51 para wr->wr_blkhash = wapbl_alloc(hashsize * sizeof(*wr->wr_blkhash));
2441 1.37 drochner for (i = 0; i < hashsize; i++)
2442 1.2 simonb LIST_INIT(&wr->wr_blkhash[i]);
2443 1.2 simonb wr->wr_blkhashmask = hashsize - 1;
2444 1.2 simonb }
2445 1.2 simonb #endif /* ! _KERNEL */
2446 1.2 simonb }
2447 1.2 simonb
2448 1.2 simonb static void
2449 1.2 simonb wapbl_blkhash_free(struct wapbl_replay *wr)
2450 1.2 simonb {
2451 1.2 simonb KASSERT(wr->wr_blkhashcnt == 0);
2452 1.2 simonb #ifdef _KERNEL
2453 1.2 simonb hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2454 1.2 simonb #else /* ! _KERNEL */
2455 1.18 yamt wapbl_free(wr->wr_blkhash,
2456 1.18 yamt (wr->wr_blkhashmask + 1) * sizeof(*wr->wr_blkhash));
2457 1.2 simonb #endif /* ! _KERNEL */
2458 1.2 simonb }
2459 1.2 simonb
2460 1.2 simonb static struct wapbl_blk *
2461 1.2 simonb wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2462 1.2 simonb {
2463 1.2 simonb struct wapbl_blk_head *wbh;
2464 1.2 simonb struct wapbl_blk *wb;
2465 1.2 simonb wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2466 1.2 simonb LIST_FOREACH(wb, wbh, wb_hash) {
2467 1.2 simonb if (blk == wb->wb_blk)
2468 1.2 simonb return wb;
2469 1.2 simonb }
2470 1.2 simonb return 0;
2471 1.2 simonb }
2472 1.2 simonb
2473 1.2 simonb static void
2474 1.2 simonb wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2475 1.2 simonb {
2476 1.2 simonb struct wapbl_blk_head *wbh;
2477 1.2 simonb struct wapbl_blk *wb;
2478 1.2 simonb wb = wapbl_blkhash_get(wr, blk);
2479 1.2 simonb if (wb) {
2480 1.2 simonb KASSERT(wb->wb_blk == blk);
2481 1.2 simonb wb->wb_off = off;
2482 1.2 simonb } else {
2483 1.51 para wb = wapbl_alloc(sizeof(*wb));
2484 1.2 simonb wb->wb_blk = blk;
2485 1.2 simonb wb->wb_off = off;
2486 1.2 simonb wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2487 1.2 simonb LIST_INSERT_HEAD(wbh, wb, wb_hash);
2488 1.2 simonb wr->wr_blkhashcnt++;
2489 1.2 simonb }
2490 1.2 simonb }
2491 1.2 simonb
2492 1.2 simonb static void
2493 1.2 simonb wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2494 1.2 simonb {
2495 1.2 simonb struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2496 1.2 simonb if (wb) {
2497 1.2 simonb KASSERT(wr->wr_blkhashcnt > 0);
2498 1.2 simonb wr->wr_blkhashcnt--;
2499 1.2 simonb LIST_REMOVE(wb, wb_hash);
2500 1.18 yamt wapbl_free(wb, sizeof(*wb));
2501 1.2 simonb }
2502 1.2 simonb }
2503 1.2 simonb
2504 1.2 simonb static void
2505 1.2 simonb wapbl_blkhash_clear(struct wapbl_replay *wr)
2506 1.2 simonb {
2507 1.25 lukem unsigned long i;
2508 1.2 simonb for (i = 0; i <= wr->wr_blkhashmask; i++) {
2509 1.2 simonb struct wapbl_blk *wb;
2510 1.2 simonb
2511 1.2 simonb while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2512 1.2 simonb KASSERT(wr->wr_blkhashcnt > 0);
2513 1.2 simonb wr->wr_blkhashcnt--;
2514 1.2 simonb LIST_REMOVE(wb, wb_hash);
2515 1.18 yamt wapbl_free(wb, sizeof(*wb));
2516 1.2 simonb }
2517 1.2 simonb }
2518 1.2 simonb KASSERT(wr->wr_blkhashcnt == 0);
2519 1.2 simonb }
2520 1.2 simonb
2521 1.2 simonb /****************************************************************/
2522 1.2 simonb
2523 1.71 riastrad /*
2524 1.71 riastrad * wapbl_circ_read(wr, data, len, offp)
2525 1.71 riastrad *
2526 1.71 riastrad * Read len bytes into data from the circular queue of wr,
2527 1.71 riastrad * starting at the linear byte offset *offp, and returning the new
2528 1.71 riastrad * linear byte offset in *offp.
2529 1.71 riastrad *
2530 1.71 riastrad * If the starting linear byte offset precedes wr->wr_circ_off,
2531 1.71 riastrad * the read instead begins at wr->wr_circ_off. XXX WTF? This
2532 1.71 riastrad * should be a KASSERT, not a conditional.
2533 1.71 riastrad */
2534 1.2 simonb static int
2535 1.2 simonb wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2536 1.2 simonb {
2537 1.2 simonb size_t slen;
2538 1.2 simonb off_t off = *offp;
2539 1.2 simonb int error;
2540 1.34 mlelstv daddr_t pbn;
2541 1.2 simonb
2542 1.14 joerg KASSERT(((len >> wr->wr_log_dev_bshift) <<
2543 1.14 joerg wr->wr_log_dev_bshift) == len);
2544 1.34 mlelstv
2545 1.14 joerg if (off < wr->wr_circ_off)
2546 1.14 joerg off = wr->wr_circ_off;
2547 1.14 joerg slen = wr->wr_circ_off + wr->wr_circ_size - off;
2548 1.2 simonb if (slen < len) {
2549 1.34 mlelstv pbn = wr->wr_logpbn + (off >> wr->wr_log_dev_bshift);
2550 1.34 mlelstv #ifdef _KERNEL
2551 1.34 mlelstv pbn = btodb(pbn << wr->wr_log_dev_bshift);
2552 1.34 mlelstv #endif
2553 1.34 mlelstv error = wapbl_read(data, slen, wr->wr_devvp, pbn);
2554 1.2 simonb if (error)
2555 1.2 simonb return error;
2556 1.2 simonb data = (uint8_t *)data + slen;
2557 1.2 simonb len -= slen;
2558 1.14 joerg off = wr->wr_circ_off;
2559 1.2 simonb }
2560 1.34 mlelstv pbn = wr->wr_logpbn + (off >> wr->wr_log_dev_bshift);
2561 1.34 mlelstv #ifdef _KERNEL
2562 1.34 mlelstv pbn = btodb(pbn << wr->wr_log_dev_bshift);
2563 1.34 mlelstv #endif
2564 1.34 mlelstv error = wapbl_read(data, len, wr->wr_devvp, pbn);
2565 1.2 simonb if (error)
2566 1.2 simonb return error;
2567 1.2 simonb off += len;
2568 1.14 joerg if (off >= wr->wr_circ_off + wr->wr_circ_size)
2569 1.14 joerg off = wr->wr_circ_off;
2570 1.2 simonb *offp = off;
2571 1.2 simonb return 0;
2572 1.2 simonb }
2573 1.2 simonb
2574 1.71 riastrad /*
2575 1.71 riastrad * wapbl_circ_advance(wr, len, offp)
2576 1.71 riastrad *
2577 1.71 riastrad * Compute the linear byte offset of the circular queue of wr that
2578 1.71 riastrad * is len bytes past *offp, and store it in *offp.
2579 1.71 riastrad *
2580 1.71 riastrad * This is as if wapbl_circ_read, but without actually reading
2581 1.71 riastrad * anything.
2582 1.71 riastrad *
2583 1.71 riastrad * If the starting linear byte offset precedes wr->wr_circ_off, it
2584 1.71 riastrad * is taken to be wr->wr_circ_off instead. XXX WTF? This should
2585 1.71 riastrad * be a KASSERT, not a conditional.
2586 1.71 riastrad */
2587 1.2 simonb static void
2588 1.2 simonb wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2589 1.2 simonb {
2590 1.2 simonb size_t slen;
2591 1.2 simonb off_t off = *offp;
2592 1.2 simonb
2593 1.14 joerg KASSERT(((len >> wr->wr_log_dev_bshift) <<
2594 1.14 joerg wr->wr_log_dev_bshift) == len);
2595 1.2 simonb
2596 1.14 joerg if (off < wr->wr_circ_off)
2597 1.14 joerg off = wr->wr_circ_off;
2598 1.14 joerg slen = wr->wr_circ_off + wr->wr_circ_size - off;
2599 1.2 simonb if (slen < len) {
2600 1.2 simonb len -= slen;
2601 1.14 joerg off = wr->wr_circ_off;
2602 1.2 simonb }
2603 1.2 simonb off += len;
2604 1.14 joerg if (off >= wr->wr_circ_off + wr->wr_circ_size)
2605 1.14 joerg off = wr->wr_circ_off;
2606 1.2 simonb *offp = off;
2607 1.2 simonb }
2608 1.2 simonb
2609 1.2 simonb /****************************************************************/
2610 1.2 simonb
2611 1.2 simonb int
2612 1.2 simonb wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2613 1.2 simonb daddr_t off, size_t count, size_t blksize)
2614 1.2 simonb {
2615 1.2 simonb struct wapbl_replay *wr;
2616 1.2 simonb int error;
2617 1.2 simonb struct vnode *devvp;
2618 1.2 simonb daddr_t logpbn;
2619 1.2 simonb uint8_t *scratch;
2620 1.2 simonb struct wapbl_wc_header *wch;
2621 1.2 simonb struct wapbl_wc_header *wch2;
2622 1.2 simonb /* Use this until we read the actual log header */
2623 1.31 mlelstv int log_dev_bshift = ilog2(blksize);
2624 1.2 simonb size_t used;
2625 1.34 mlelstv daddr_t pbn;
2626 1.2 simonb
2627 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2628 1.2 simonb ("wapbl_replay_start: vp=%p off=%"PRId64 " count=%zu blksize=%zu\n",
2629 1.2 simonb vp, off, count, blksize));
2630 1.2 simonb
2631 1.2 simonb if (off < 0)
2632 1.2 simonb return EINVAL;
2633 1.2 simonb
2634 1.2 simonb if (blksize < DEV_BSIZE)
2635 1.2 simonb return EINVAL;
2636 1.2 simonb if (blksize % DEV_BSIZE)
2637 1.2 simonb return EINVAL;
2638 1.2 simonb
2639 1.2 simonb #ifdef _KERNEL
2640 1.2 simonb #if 0
2641 1.2 simonb /* XXX vp->v_size isn't reliably set for VBLK devices,
2642 1.2 simonb * especially root. However, we might still want to verify
2643 1.2 simonb * that the full load is readable */
2644 1.2 simonb if ((off + count) * blksize > vp->v_size)
2645 1.2 simonb return EINVAL;
2646 1.2 simonb #endif
2647 1.2 simonb if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2648 1.2 simonb return error;
2649 1.2 simonb }
2650 1.2 simonb #else /* ! _KERNEL */
2651 1.2 simonb devvp = vp;
2652 1.2 simonb logpbn = off;
2653 1.2 simonb #endif /* ! _KERNEL */
2654 1.2 simonb
2655 1.51 para scratch = wapbl_alloc(MAXBSIZE);
2656 1.2 simonb
2657 1.34 mlelstv pbn = logpbn;
2658 1.34 mlelstv #ifdef _KERNEL
2659 1.34 mlelstv pbn = btodb(pbn << log_dev_bshift);
2660 1.34 mlelstv #endif
2661 1.34 mlelstv error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, pbn);
2662 1.2 simonb if (error)
2663 1.2 simonb goto errout;
2664 1.2 simonb
2665 1.2 simonb wch = (struct wapbl_wc_header *)scratch;
2666 1.2 simonb wch2 =
2667 1.2 simonb (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
2668 1.2 simonb /* XXX verify checksums and magic numbers */
2669 1.2 simonb if (wch->wc_type != WAPBL_WC_HEADER) {
2670 1.2 simonb printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
2671 1.2 simonb error = EFTYPE;
2672 1.2 simonb goto errout;
2673 1.2 simonb }
2674 1.2 simonb
2675 1.2 simonb if (wch2->wc_generation > wch->wc_generation)
2676 1.2 simonb wch = wch2;
2677 1.2 simonb
2678 1.2 simonb wr = wapbl_calloc(1, sizeof(*wr));
2679 1.2 simonb
2680 1.2 simonb wr->wr_logvp = vp;
2681 1.2 simonb wr->wr_devvp = devvp;
2682 1.2 simonb wr->wr_logpbn = logpbn;
2683 1.2 simonb
2684 1.2 simonb wr->wr_scratch = scratch;
2685 1.2 simonb
2686 1.14 joerg wr->wr_log_dev_bshift = wch->wc_log_dev_bshift;
2687 1.14 joerg wr->wr_fs_dev_bshift = wch->wc_fs_dev_bshift;
2688 1.14 joerg wr->wr_circ_off = wch->wc_circ_off;
2689 1.14 joerg wr->wr_circ_size = wch->wc_circ_size;
2690 1.14 joerg wr->wr_generation = wch->wc_generation;
2691 1.2 simonb
2692 1.2 simonb used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
2693 1.2 simonb
2694 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2695 1.2 simonb ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
2696 1.2 simonb " len=%"PRId64" used=%zu\n",
2697 1.2 simonb wch->wc_head, wch->wc_tail, wch->wc_circ_off,
2698 1.2 simonb wch->wc_circ_size, used));
2699 1.2 simonb
2700 1.2 simonb wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
2701 1.11 joerg
2702 1.14 joerg error = wapbl_replay_process(wr, wch->wc_head, wch->wc_tail);
2703 1.2 simonb if (error) {
2704 1.2 simonb wapbl_replay_stop(wr);
2705 1.2 simonb wapbl_replay_free(wr);
2706 1.2 simonb return error;
2707 1.2 simonb }
2708 1.2 simonb
2709 1.2 simonb *wrp = wr;
2710 1.2 simonb return 0;
2711 1.2 simonb
2712 1.2 simonb errout:
2713 1.18 yamt wapbl_free(scratch, MAXBSIZE);
2714 1.2 simonb return error;
2715 1.2 simonb }
2716 1.2 simonb
2717 1.2 simonb void
2718 1.2 simonb wapbl_replay_stop(struct wapbl_replay *wr)
2719 1.2 simonb {
2720 1.2 simonb
2721 1.4 joerg if (!wapbl_replay_isopen(wr))
2722 1.4 joerg return;
2723 1.4 joerg
2724 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
2725 1.2 simonb
2726 1.18 yamt wapbl_free(wr->wr_scratch, MAXBSIZE);
2727 1.18 yamt wr->wr_scratch = NULL;
2728 1.2 simonb
2729 1.18 yamt wr->wr_logvp = NULL;
2730 1.2 simonb
2731 1.2 simonb wapbl_blkhash_clear(wr);
2732 1.2 simonb wapbl_blkhash_free(wr);
2733 1.2 simonb }
2734 1.2 simonb
2735 1.2 simonb void
2736 1.2 simonb wapbl_replay_free(struct wapbl_replay *wr)
2737 1.2 simonb {
2738 1.2 simonb
2739 1.2 simonb KDASSERT(!wapbl_replay_isopen(wr));
2740 1.2 simonb
2741 1.2 simonb if (wr->wr_inodes)
2742 1.18 yamt wapbl_free(wr->wr_inodes,
2743 1.18 yamt wr->wr_inodescnt * sizeof(wr->wr_inodes[0]));
2744 1.18 yamt wapbl_free(wr, sizeof(*wr));
2745 1.2 simonb }
2746 1.2 simonb
2747 1.4 joerg #ifdef _KERNEL
2748 1.2 simonb int
2749 1.2 simonb wapbl_replay_isopen1(struct wapbl_replay *wr)
2750 1.2 simonb {
2751 1.2 simonb
2752 1.2 simonb return wapbl_replay_isopen(wr);
2753 1.2 simonb }
2754 1.4 joerg #endif
2755 1.2 simonb
2756 1.62 mlelstv /*
2757 1.62 mlelstv * calculate the disk address for the i'th block in the wc_blockblist
2758 1.62 mlelstv * offset by j blocks of size blen.
2759 1.62 mlelstv *
2760 1.62 mlelstv * wc_daddr is always a kernel disk address in DEV_BSIZE units that
2761 1.62 mlelstv * was written to the journal.
2762 1.62 mlelstv *
2763 1.62 mlelstv * The kernel needs that address plus the offset in DEV_BSIZE units.
2764 1.62 mlelstv *
2765 1.62 mlelstv * Userland needs that address plus the offset in blen units.
2766 1.62 mlelstv *
2767 1.62 mlelstv */
2768 1.62 mlelstv static daddr_t
2769 1.62 mlelstv wapbl_block_daddr(struct wapbl_wc_blocklist *wc, int i, int j, int blen)
2770 1.62 mlelstv {
2771 1.62 mlelstv daddr_t pbn;
2772 1.62 mlelstv
2773 1.62 mlelstv #ifdef _KERNEL
2774 1.62 mlelstv pbn = wc->wc_blocks[i].wc_daddr + btodb(j * blen);
2775 1.62 mlelstv #else
2776 1.62 mlelstv pbn = dbtob(wc->wc_blocks[i].wc_daddr) / blen + j;
2777 1.62 mlelstv #endif
2778 1.62 mlelstv
2779 1.62 mlelstv return pbn;
2780 1.62 mlelstv }
2781 1.62 mlelstv
2782 1.10 joerg static void
2783 1.10 joerg wapbl_replay_process_blocks(struct wapbl_replay *wr, off_t *offp)
2784 1.10 joerg {
2785 1.10 joerg struct wapbl_wc_blocklist *wc =
2786 1.10 joerg (struct wapbl_wc_blocklist *)wr->wr_scratch;
2787 1.14 joerg int fsblklen = 1 << wr->wr_fs_dev_bshift;
2788 1.10 joerg int i, j, n;
2789 1.10 joerg
2790 1.10 joerg for (i = 0; i < wc->wc_blkcount; i++) {
2791 1.10 joerg /*
2792 1.10 joerg * Enter each physical block into the hashtable independently.
2793 1.10 joerg */
2794 1.14 joerg n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
2795 1.10 joerg for (j = 0; j < n; j++) {
2796 1.62 mlelstv wapbl_blkhash_ins(wr, wapbl_block_daddr(wc, i, j, fsblklen),
2797 1.10 joerg *offp);
2798 1.10 joerg wapbl_circ_advance(wr, fsblklen, offp);
2799 1.10 joerg }
2800 1.10 joerg }
2801 1.10 joerg }
2802 1.10 joerg
2803 1.10 joerg static void
2804 1.10 joerg wapbl_replay_process_revocations(struct wapbl_replay *wr)
2805 1.10 joerg {
2806 1.10 joerg struct wapbl_wc_blocklist *wc =
2807 1.10 joerg (struct wapbl_wc_blocklist *)wr->wr_scratch;
2808 1.34 mlelstv int fsblklen = 1 << wr->wr_fs_dev_bshift;
2809 1.10 joerg int i, j, n;
2810 1.10 joerg
2811 1.10 joerg for (i = 0; i < wc->wc_blkcount; i++) {
2812 1.10 joerg /*
2813 1.10 joerg * Remove any blocks found from the hashtable.
2814 1.10 joerg */
2815 1.14 joerg n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
2816 1.10 joerg for (j = 0; j < n; j++)
2817 1.62 mlelstv wapbl_blkhash_rem(wr, wapbl_block_daddr(wc, i, j, fsblklen));
2818 1.10 joerg }
2819 1.10 joerg }
2820 1.10 joerg
2821 1.10 joerg static void
2822 1.10 joerg wapbl_replay_process_inodes(struct wapbl_replay *wr, off_t oldoff, off_t newoff)
2823 1.10 joerg {
2824 1.10 joerg struct wapbl_wc_inodelist *wc =
2825 1.10 joerg (struct wapbl_wc_inodelist *)wr->wr_scratch;
2826 1.18 yamt void *new_inodes;
2827 1.18 yamt const size_t oldsize = wr->wr_inodescnt * sizeof(wr->wr_inodes[0]);
2828 1.18 yamt
2829 1.18 yamt KASSERT(sizeof(wr->wr_inodes[0]) == sizeof(wc->wc_inodes[0]));
2830 1.18 yamt
2831 1.10 joerg /*
2832 1.10 joerg * Keep track of where we found this so location won't be
2833 1.10 joerg * overwritten.
2834 1.10 joerg */
2835 1.10 joerg if (wc->wc_clear) {
2836 1.10 joerg wr->wr_inodestail = oldoff;
2837 1.10 joerg wr->wr_inodescnt = 0;
2838 1.12 joerg if (wr->wr_inodes != NULL) {
2839 1.18 yamt wapbl_free(wr->wr_inodes, oldsize);
2840 1.12 joerg wr->wr_inodes = NULL;
2841 1.12 joerg }
2842 1.10 joerg }
2843 1.10 joerg wr->wr_inodeshead = newoff;
2844 1.10 joerg if (wc->wc_inocnt == 0)
2845 1.10 joerg return;
2846 1.10 joerg
2847 1.51 para new_inodes = wapbl_alloc((wr->wr_inodescnt + wc->wc_inocnt) *
2848 1.18 yamt sizeof(wr->wr_inodes[0]));
2849 1.18 yamt if (wr->wr_inodes != NULL) {
2850 1.18 yamt memcpy(new_inodes, wr->wr_inodes, oldsize);
2851 1.18 yamt wapbl_free(wr->wr_inodes, oldsize);
2852 1.18 yamt }
2853 1.18 yamt wr->wr_inodes = new_inodes;
2854 1.10 joerg memcpy(&wr->wr_inodes[wr->wr_inodescnt], wc->wc_inodes,
2855 1.18 yamt wc->wc_inocnt * sizeof(wr->wr_inodes[0]));
2856 1.10 joerg wr->wr_inodescnt += wc->wc_inocnt;
2857 1.10 joerg }
2858 1.10 joerg
2859 1.2 simonb static int
2860 1.14 joerg wapbl_replay_process(struct wapbl_replay *wr, off_t head, off_t tail)
2861 1.2 simonb {
2862 1.2 simonb off_t off;
2863 1.2 simonb int error;
2864 1.2 simonb
2865 1.14 joerg int logblklen = 1 << wr->wr_log_dev_bshift;
2866 1.2 simonb
2867 1.2 simonb wapbl_blkhash_clear(wr);
2868 1.2 simonb
2869 1.14 joerg off = tail;
2870 1.14 joerg while (off != head) {
2871 1.2 simonb struct wapbl_wc_null *wcn;
2872 1.2 simonb off_t saveoff = off;
2873 1.2 simonb error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2874 1.2 simonb if (error)
2875 1.2 simonb goto errout;
2876 1.2 simonb wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2877 1.2 simonb switch (wcn->wc_type) {
2878 1.2 simonb case WAPBL_WC_BLOCKS:
2879 1.10 joerg wapbl_replay_process_blocks(wr, &off);
2880 1.2 simonb break;
2881 1.2 simonb
2882 1.2 simonb case WAPBL_WC_REVOCATIONS:
2883 1.10 joerg wapbl_replay_process_revocations(wr);
2884 1.2 simonb break;
2885 1.2 simonb
2886 1.2 simonb case WAPBL_WC_INODES:
2887 1.10 joerg wapbl_replay_process_inodes(wr, saveoff, off);
2888 1.2 simonb break;
2889 1.10 joerg
2890 1.2 simonb default:
2891 1.2 simonb printf("Unrecognized wapbl type: 0x%08x\n",
2892 1.2 simonb wcn->wc_type);
2893 1.2 simonb error = EFTYPE;
2894 1.2 simonb goto errout;
2895 1.2 simonb }
2896 1.2 simonb wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2897 1.2 simonb if (off != saveoff) {
2898 1.2 simonb printf("wapbl_replay: corrupted records\n");
2899 1.2 simonb error = EFTYPE;
2900 1.2 simonb goto errout;
2901 1.2 simonb }
2902 1.2 simonb }
2903 1.2 simonb return 0;
2904 1.2 simonb
2905 1.2 simonb errout:
2906 1.2 simonb wapbl_blkhash_clear(wr);
2907 1.2 simonb return error;
2908 1.2 simonb }
2909 1.2 simonb
2910 1.13 joerg #if 0
2911 1.2 simonb int
2912 1.2 simonb wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
2913 1.2 simonb {
2914 1.2 simonb off_t off;
2915 1.2 simonb int mismatchcnt = 0;
2916 1.14 joerg int logblklen = 1 << wr->wr_log_dev_bshift;
2917 1.14 joerg int fsblklen = 1 << wr->wr_fs_dev_bshift;
2918 1.51 para void *scratch1 = wapbl_alloc(MAXBSIZE);
2919 1.51 para void *scratch2 = wapbl_alloc(MAXBSIZE);
2920 1.2 simonb int error = 0;
2921 1.2 simonb
2922 1.2 simonb KDASSERT(wapbl_replay_isopen(wr));
2923 1.2 simonb
2924 1.2 simonb off = wch->wc_tail;
2925 1.2 simonb while (off != wch->wc_head) {
2926 1.2 simonb struct wapbl_wc_null *wcn;
2927 1.2 simonb #ifdef DEBUG
2928 1.2 simonb off_t saveoff = off;
2929 1.2 simonb #endif
2930 1.2 simonb error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2931 1.2 simonb if (error)
2932 1.2 simonb goto out;
2933 1.2 simonb wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2934 1.2 simonb switch (wcn->wc_type) {
2935 1.2 simonb case WAPBL_WC_BLOCKS:
2936 1.2 simonb {
2937 1.2 simonb struct wapbl_wc_blocklist *wc =
2938 1.2 simonb (struct wapbl_wc_blocklist *)wr->wr_scratch;
2939 1.2 simonb int i;
2940 1.2 simonb for (i = 0; i < wc->wc_blkcount; i++) {
2941 1.2 simonb int foundcnt = 0;
2942 1.2 simonb int dirtycnt = 0;
2943 1.2 simonb int j, n;
2944 1.2 simonb /*
2945 1.2 simonb * Check each physical block into the
2946 1.2 simonb * hashtable independently
2947 1.2 simonb */
2948 1.2 simonb n = wc->wc_blocks[i].wc_dlen >>
2949 1.2 simonb wch->wc_fs_dev_bshift;
2950 1.2 simonb for (j = 0; j < n; j++) {
2951 1.2 simonb struct wapbl_blk *wb =
2952 1.2 simonb wapbl_blkhash_get(wr,
2953 1.62 mlelstv wapbl_block_daddr(wc, i, j, fsblklen));
2954 1.2 simonb if (wb && (wb->wb_off == off)) {
2955 1.2 simonb foundcnt++;
2956 1.2 simonb error =
2957 1.2 simonb wapbl_circ_read(wr,
2958 1.2 simonb scratch1, fsblklen,
2959 1.2 simonb &off);
2960 1.2 simonb if (error)
2961 1.2 simonb goto out;
2962 1.2 simonb error =
2963 1.2 simonb wapbl_read(scratch2,
2964 1.2 simonb fsblklen, fsdevvp,
2965 1.2 simonb wb->wb_blk);
2966 1.2 simonb if (error)
2967 1.2 simonb goto out;
2968 1.2 simonb if (memcmp(scratch1,
2969 1.2 simonb scratch2,
2970 1.2 simonb fsblklen)) {
2971 1.2 simonb printf(
2972 1.2 simonb "wapbl_verify: mismatch block %"PRId64" at off %"PRIdMAX"\n",
2973 1.2 simonb wb->wb_blk, (intmax_t)off);
2974 1.2 simonb dirtycnt++;
2975 1.2 simonb mismatchcnt++;
2976 1.2 simonb }
2977 1.2 simonb } else {
2978 1.2 simonb wapbl_circ_advance(wr,
2979 1.2 simonb fsblklen, &off);
2980 1.2 simonb }
2981 1.2 simonb }
2982 1.2 simonb #if 0
2983 1.2 simonb /*
2984 1.2 simonb * If all of the blocks in an entry
2985 1.2 simonb * are clean, then remove all of its
2986 1.2 simonb * blocks from the hashtable since they
2987 1.2 simonb * never will need replay.
2988 1.2 simonb */
2989 1.2 simonb if ((foundcnt != 0) &&
2990 1.2 simonb (dirtycnt == 0)) {
2991 1.2 simonb off = saveoff;
2992 1.2 simonb wapbl_circ_advance(wr,
2993 1.2 simonb logblklen, &off);
2994 1.2 simonb for (j = 0; j < n; j++) {
2995 1.2 simonb struct wapbl_blk *wb =
2996 1.2 simonb wapbl_blkhash_get(wr,
2997 1.62 mlelstv wapbl_block_daddr(wc, i, j, fsblklen));
2998 1.2 simonb if (wb &&
2999 1.2 simonb (wb->wb_off == off)) {
3000 1.2 simonb wapbl_blkhash_rem(wr, wb->wb_blk);
3001 1.2 simonb }
3002 1.2 simonb wapbl_circ_advance(wr,
3003 1.2 simonb fsblklen, &off);
3004 1.2 simonb }
3005 1.2 simonb }
3006 1.2 simonb #endif
3007 1.2 simonb }
3008 1.2 simonb }
3009 1.2 simonb break;
3010 1.2 simonb case WAPBL_WC_REVOCATIONS:
3011 1.2 simonb case WAPBL_WC_INODES:
3012 1.2 simonb break;
3013 1.2 simonb default:
3014 1.2 simonb KASSERT(0);
3015 1.2 simonb }
3016 1.2 simonb #ifdef DEBUG
3017 1.2 simonb wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
3018 1.2 simonb KASSERT(off == saveoff);
3019 1.2 simonb #endif
3020 1.2 simonb }
3021 1.2 simonb out:
3022 1.18 yamt wapbl_free(scratch1, MAXBSIZE);
3023 1.18 yamt wapbl_free(scratch2, MAXBSIZE);
3024 1.2 simonb if (!error && mismatchcnt)
3025 1.2 simonb error = EFTYPE;
3026 1.2 simonb return error;
3027 1.2 simonb }
3028 1.2 simonb #endif
3029 1.2 simonb
3030 1.2 simonb int
3031 1.2 simonb wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
3032 1.2 simonb {
3033 1.9 joerg struct wapbl_blk *wb;
3034 1.9 joerg size_t i;
3035 1.2 simonb off_t off;
3036 1.9 joerg void *scratch;
3037 1.2 simonb int error = 0;
3038 1.14 joerg int fsblklen = 1 << wr->wr_fs_dev_bshift;
3039 1.2 simonb
3040 1.2 simonb KDASSERT(wapbl_replay_isopen(wr));
3041 1.2 simonb
3042 1.51 para scratch = wapbl_alloc(MAXBSIZE);
3043 1.2 simonb
3044 1.37 drochner for (i = 0; i <= wr->wr_blkhashmask; ++i) {
3045 1.9 joerg LIST_FOREACH(wb, &wr->wr_blkhash[i], wb_hash) {
3046 1.9 joerg off = wb->wb_off;
3047 1.9 joerg error = wapbl_circ_read(wr, scratch, fsblklen, &off);
3048 1.9 joerg if (error)
3049 1.9 joerg break;
3050 1.9 joerg error = wapbl_write(scratch, fsblklen, fsdevvp,
3051 1.9 joerg wb->wb_blk);
3052 1.9 joerg if (error)
3053 1.9 joerg break;
3054 1.2 simonb }
3055 1.2 simonb }
3056 1.9 joerg
3057 1.18 yamt wapbl_free(scratch, MAXBSIZE);
3058 1.2 simonb return error;
3059 1.2 simonb }
3060 1.2 simonb
3061 1.2 simonb int
3062 1.6 joerg wapbl_replay_can_read(struct wapbl_replay *wr, daddr_t blk, long len)
3063 1.6 joerg {
3064 1.14 joerg int fsblklen = 1 << wr->wr_fs_dev_bshift;
3065 1.6 joerg
3066 1.6 joerg KDASSERT(wapbl_replay_isopen(wr));
3067 1.6 joerg KASSERT((len % fsblklen) == 0);
3068 1.6 joerg
3069 1.6 joerg while (len != 0) {
3070 1.6 joerg struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
3071 1.6 joerg if (wb)
3072 1.6 joerg return 1;
3073 1.6 joerg len -= fsblklen;
3074 1.6 joerg }
3075 1.6 joerg return 0;
3076 1.6 joerg }
3077 1.6 joerg
3078 1.6 joerg int
3079 1.2 simonb wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
3080 1.2 simonb {
3081 1.14 joerg int fsblklen = 1 << wr->wr_fs_dev_bshift;
3082 1.2 simonb
3083 1.2 simonb KDASSERT(wapbl_replay_isopen(wr));
3084 1.2 simonb
3085 1.2 simonb KASSERT((len % fsblklen) == 0);
3086 1.2 simonb
3087 1.2 simonb while (len != 0) {
3088 1.2 simonb struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
3089 1.2 simonb if (wb) {
3090 1.2 simonb off_t off = wb->wb_off;
3091 1.2 simonb int error;
3092 1.2 simonb error = wapbl_circ_read(wr, data, fsblklen, &off);
3093 1.2 simonb if (error)
3094 1.2 simonb return error;
3095 1.2 simonb }
3096 1.2 simonb data = (uint8_t *)data + fsblklen;
3097 1.2 simonb len -= fsblklen;
3098 1.2 simonb blk++;
3099 1.2 simonb }
3100 1.2 simonb return 0;
3101 1.2 simonb }
3102 1.35 pooka
3103 1.36 pooka #ifdef _KERNEL
3104 1.64 pgoyette
3105 1.35 pooka MODULE(MODULE_CLASS_VFS, wapbl, NULL);
3106 1.35 pooka
3107 1.35 pooka static int
3108 1.35 pooka wapbl_modcmd(modcmd_t cmd, void *arg)
3109 1.35 pooka {
3110 1.35 pooka
3111 1.35 pooka switch (cmd) {
3112 1.35 pooka case MODULE_CMD_INIT:
3113 1.39 christos wapbl_init();
3114 1.35 pooka return 0;
3115 1.35 pooka case MODULE_CMD_FINI:
3116 1.74 riastrad return wapbl_fini();
3117 1.35 pooka default:
3118 1.35 pooka return ENOTTY;
3119 1.35 pooka }
3120 1.35 pooka }
3121 1.36 pooka #endif /* _KERNEL */
3122