vfs_wapbl.c revision 1.25.2.3 1 1.25.2.3 yamt /* $NetBSD: vfs_wapbl.c,v 1.25.2.3 2009/07/18 14:53:23 yamt Exp $ */
2 1.25.2.2 yamt
3 1.25.2.2 yamt /*-
4 1.25.2.2 yamt * Copyright (c) 2003, 2008, 2009 The NetBSD Foundation, Inc.
5 1.25.2.2 yamt * All rights reserved.
6 1.25.2.2 yamt *
7 1.25.2.2 yamt * This code is derived from software contributed to The NetBSD Foundation
8 1.25.2.2 yamt * by Wasabi Systems, Inc.
9 1.25.2.2 yamt *
10 1.25.2.2 yamt * Redistribution and use in source and binary forms, with or without
11 1.25.2.2 yamt * modification, are permitted provided that the following conditions
12 1.25.2.2 yamt * are met:
13 1.25.2.2 yamt * 1. Redistributions of source code must retain the above copyright
14 1.25.2.2 yamt * notice, this list of conditions and the following disclaimer.
15 1.25.2.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
16 1.25.2.2 yamt * notice, this list of conditions and the following disclaimer in the
17 1.25.2.2 yamt * documentation and/or other materials provided with the distribution.
18 1.25.2.2 yamt *
19 1.25.2.2 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.25.2.2 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.25.2.2 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.25.2.2 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.25.2.2 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.25.2.2 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.25.2.2 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.25.2.2 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.25.2.2 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.25.2.2 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.25.2.2 yamt * POSSIBILITY OF SUCH DAMAGE.
30 1.25.2.2 yamt */
31 1.25.2.2 yamt
32 1.25.2.2 yamt /*
33 1.25.2.2 yamt * This implements file system independent write ahead filesystem logging.
34 1.25.2.2 yamt */
35 1.25.2.2 yamt
36 1.25.2.2 yamt #define WAPBL_INTERNAL
37 1.25.2.2 yamt
38 1.25.2.2 yamt #include <sys/cdefs.h>
39 1.25.2.3 yamt __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.25.2.3 2009/07/18 14:53:23 yamt Exp $");
40 1.25.2.2 yamt
41 1.25.2.2 yamt #include <sys/param.h>
42 1.25.2.2 yamt
43 1.25.2.2 yamt #ifdef _KERNEL
44 1.25.2.2 yamt #include <sys/param.h>
45 1.25.2.2 yamt #include <sys/namei.h>
46 1.25.2.2 yamt #include <sys/proc.h>
47 1.25.2.2 yamt #include <sys/uio.h>
48 1.25.2.2 yamt #include <sys/vnode.h>
49 1.25.2.2 yamt #include <sys/file.h>
50 1.25.2.2 yamt #include <sys/malloc.h>
51 1.25.2.2 yamt #include <sys/resourcevar.h>
52 1.25.2.2 yamt #include <sys/conf.h>
53 1.25.2.2 yamt #include <sys/mount.h>
54 1.25.2.2 yamt #include <sys/kernel.h>
55 1.25.2.2 yamt #include <sys/kauth.h>
56 1.25.2.2 yamt #include <sys/mutex.h>
57 1.25.2.2 yamt #include <sys/atomic.h>
58 1.25.2.2 yamt #include <sys/wapbl.h>
59 1.25.2.2 yamt #include <sys/wapbl_replay.h>
60 1.25.2.2 yamt
61 1.25.2.2 yamt #include <miscfs/specfs/specdev.h>
62 1.25.2.2 yamt
63 1.25.2.2 yamt #if 0 /* notyet */
64 1.25.2.2 yamt #define wapbl_malloc(s) kmem_alloc((s), KM_SLEEP)
65 1.25.2.2 yamt #define wapbl_free(a, s) kmem_free((a), (s))
66 1.25.2.2 yamt #define wapbl_calloc(n, s) kmem_zalloc((n)*(s), KM_SLEEP)
67 1.25.2.2 yamt #else
68 1.25.2.2 yamt MALLOC_JUSTDEFINE(M_WAPBL, "wapbl", "write-ahead physical block logging");
69 1.25.2.2 yamt #define wapbl_malloc(s) malloc((s), M_WAPBL, M_WAITOK)
70 1.25.2.2 yamt #define wapbl_free(a, s) free((a), M_WAPBL)
71 1.25.2.2 yamt #define wapbl_calloc(n, s) malloc((n)*(s), M_WAPBL, M_WAITOK | M_ZERO)
72 1.25.2.2 yamt #endif
73 1.25.2.2 yamt
74 1.25.2.2 yamt #else /* !_KERNEL */
75 1.25.2.2 yamt #include <assert.h>
76 1.25.2.2 yamt #include <errno.h>
77 1.25.2.2 yamt #include <stdio.h>
78 1.25.2.2 yamt #include <stdbool.h>
79 1.25.2.2 yamt #include <stdlib.h>
80 1.25.2.2 yamt #include <string.h>
81 1.25.2.2 yamt
82 1.25.2.2 yamt #include <sys/time.h>
83 1.25.2.2 yamt #include <sys/wapbl.h>
84 1.25.2.2 yamt #include <sys/wapbl_replay.h>
85 1.25.2.2 yamt
86 1.25.2.2 yamt #define KDASSERT(x) assert(x)
87 1.25.2.2 yamt #define KASSERT(x) assert(x)
88 1.25.2.2 yamt #define wapbl_malloc(s) malloc(s)
89 1.25.2.2 yamt #define wapbl_free(a, s) free(a)
90 1.25.2.2 yamt #define wapbl_calloc(n, s) calloc((n), (s))
91 1.25.2.2 yamt
92 1.25.2.2 yamt #endif /* !_KERNEL */
93 1.25.2.2 yamt
94 1.25.2.2 yamt /*
95 1.25.2.2 yamt * INTERNAL DATA STRUCTURES
96 1.25.2.2 yamt */
97 1.25.2.2 yamt
98 1.25.2.2 yamt /*
99 1.25.2.2 yamt * This structure holds per-mount log information.
100 1.25.2.2 yamt *
101 1.25.2.2 yamt * Legend: a = atomic access only
102 1.25.2.2 yamt * r = read-only after init
103 1.25.2.2 yamt * l = rwlock held
104 1.25.2.2 yamt * m = mutex held
105 1.25.2.2 yamt * u = unlocked access ok
106 1.25.2.2 yamt * b = bufcache_lock held
107 1.25.2.2 yamt */
108 1.25.2.2 yamt struct wapbl {
109 1.25.2.2 yamt struct vnode *wl_logvp; /* r: log here */
110 1.25.2.2 yamt struct vnode *wl_devvp; /* r: log on this device */
111 1.25.2.2 yamt struct mount *wl_mount; /* r: mountpoint wl is associated with */
112 1.25.2.2 yamt daddr_t wl_logpbn; /* r: Physical block number of start of log */
113 1.25.2.2 yamt int wl_log_dev_bshift; /* r: logarithm of device block size of log
114 1.25.2.2 yamt device */
115 1.25.2.2 yamt int wl_fs_dev_bshift; /* r: logarithm of device block size of
116 1.25.2.2 yamt filesystem device */
117 1.25.2.2 yamt
118 1.25.2.2 yamt unsigned wl_lock_count; /* m: Count of transactions in progress */
119 1.25.2.2 yamt
120 1.25.2.2 yamt size_t wl_circ_size; /* r: Number of bytes in buffer of log */
121 1.25.2.2 yamt size_t wl_circ_off; /* r: Number of bytes reserved at start */
122 1.25.2.2 yamt
123 1.25.2.2 yamt size_t wl_bufcount_max; /* r: Number of buffers reserved for log */
124 1.25.2.2 yamt size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */
125 1.25.2.2 yamt
126 1.25.2.2 yamt off_t wl_head; /* l: Byte offset of log head */
127 1.25.2.2 yamt off_t wl_tail; /* l: Byte offset of log tail */
128 1.25.2.2 yamt /*
129 1.25.2.2 yamt * head == tail == 0 means log is empty
130 1.25.2.2 yamt * head == tail != 0 means log is full
131 1.25.2.2 yamt * see assertions in wapbl_advance() for other boundary conditions.
132 1.25.2.2 yamt * only truncate moves the tail, except when flush sets it to
133 1.25.2.2 yamt * wl_header_size only flush moves the head, except when truncate
134 1.25.2.2 yamt * sets it to 0.
135 1.25.2.2 yamt */
136 1.25.2.2 yamt
137 1.25.2.2 yamt struct wapbl_wc_header *wl_wc_header; /* l */
138 1.25.2.2 yamt void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */
139 1.25.2.2 yamt
140 1.25.2.2 yamt kmutex_t wl_mtx; /* u: short-term lock */
141 1.25.2.2 yamt krwlock_t wl_rwlock; /* u: File system transaction lock */
142 1.25.2.2 yamt
143 1.25.2.2 yamt /*
144 1.25.2.2 yamt * Must be held while accessing
145 1.25.2.2 yamt * wl_count or wl_bufs or head or tail
146 1.25.2.2 yamt */
147 1.25.2.2 yamt
148 1.25.2.2 yamt /*
149 1.25.2.2 yamt * Callback called from within the flush routine to flush any extra
150 1.25.2.2 yamt * bits. Note that flush may be skipped without calling this if
151 1.25.2.2 yamt * there are no outstanding buffers in the transaction.
152 1.25.2.2 yamt */
153 1.25.2.2 yamt #if _KERNEL
154 1.25.2.2 yamt wapbl_flush_fn_t wl_flush; /* r */
155 1.25.2.2 yamt wapbl_flush_fn_t wl_flush_abort;/* r */
156 1.25.2.2 yamt #endif
157 1.25.2.2 yamt
158 1.25.2.2 yamt size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */
159 1.25.2.2 yamt size_t wl_bufcount; /* m: Count of buffers in wl_bufs */
160 1.25.2.2 yamt size_t wl_bcount; /* m: Total bcount of wl_bufs */
161 1.25.2.2 yamt
162 1.25.2.2 yamt LIST_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
163 1.25.2.2 yamt
164 1.25.2.2 yamt kcondvar_t wl_reclaimable_cv; /* m (obviously) */
165 1.25.2.2 yamt size_t wl_reclaimable_bytes; /* m: Amount of space available for
166 1.25.2.2 yamt reclamation by truncate */
167 1.25.2.2 yamt int wl_error_count; /* m: # of wl_entries with errors */
168 1.25.2.2 yamt size_t wl_reserved_bytes; /* never truncate log smaller than this */
169 1.25.2.2 yamt
170 1.25.2.2 yamt #ifdef WAPBL_DEBUG_BUFBYTES
171 1.25.2.2 yamt size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
172 1.25.2.2 yamt #endif
173 1.25.2.2 yamt
174 1.25.2.2 yamt daddr_t *wl_deallocblks;/* l: address of block */
175 1.25.2.2 yamt int *wl_dealloclens; /* l: size of block */
176 1.25.2.2 yamt int wl_dealloccnt; /* l: total count */
177 1.25.2.2 yamt int wl_dealloclim; /* l: max count */
178 1.25.2.2 yamt
179 1.25.2.2 yamt /* hashtable of inode numbers for allocated but unlinked inodes */
180 1.25.2.2 yamt /* synch ??? */
181 1.25.2.2 yamt LIST_HEAD(wapbl_ino_head, wapbl_ino) *wl_inohash;
182 1.25.2.2 yamt u_long wl_inohashmask;
183 1.25.2.2 yamt int wl_inohashcnt;
184 1.25.2.2 yamt
185 1.25.2.2 yamt SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* On disk transaction
186 1.25.2.2 yamt accounting */
187 1.25.2.2 yamt };
188 1.25.2.2 yamt
189 1.25.2.2 yamt #ifdef WAPBL_DEBUG_PRINT
190 1.25.2.2 yamt int wapbl_debug_print = WAPBL_DEBUG_PRINT;
191 1.25.2.2 yamt #endif
192 1.25.2.2 yamt
193 1.25.2.2 yamt /****************************************************************/
194 1.25.2.2 yamt #ifdef _KERNEL
195 1.25.2.2 yamt
196 1.25.2.2 yamt #ifdef WAPBL_DEBUG
197 1.25.2.2 yamt struct wapbl *wapbl_debug_wl;
198 1.25.2.2 yamt #endif
199 1.25.2.2 yamt
200 1.25.2.2 yamt static int wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail);
201 1.25.2.2 yamt static int wapbl_write_blocks(struct wapbl *wl, off_t *offp);
202 1.25.2.2 yamt static int wapbl_write_revocations(struct wapbl *wl, off_t *offp);
203 1.25.2.2 yamt static int wapbl_write_inodes(struct wapbl *wl, off_t *offp);
204 1.25.2.2 yamt #endif /* _KERNEL */
205 1.25.2.2 yamt
206 1.25.2.2 yamt static int wapbl_replay_process(struct wapbl_replay *wr, off_t, off_t);
207 1.25.2.2 yamt
208 1.25.2.2 yamt static __inline size_t wapbl_space_free(size_t avail, off_t head,
209 1.25.2.2 yamt off_t tail);
210 1.25.2.2 yamt static __inline size_t wapbl_space_used(size_t avail, off_t head,
211 1.25.2.2 yamt off_t tail);
212 1.25.2.2 yamt
213 1.25.2.2 yamt #ifdef _KERNEL
214 1.25.2.2 yamt
215 1.25.2.2 yamt #define WAPBL_INODETRK_SIZE 83
216 1.25.2.2 yamt static int wapbl_ino_pool_refcount;
217 1.25.2.2 yamt static struct pool wapbl_ino_pool;
218 1.25.2.2 yamt struct wapbl_ino {
219 1.25.2.2 yamt LIST_ENTRY(wapbl_ino) wi_hash;
220 1.25.2.2 yamt ino_t wi_ino;
221 1.25.2.2 yamt mode_t wi_mode;
222 1.25.2.2 yamt };
223 1.25.2.2 yamt
224 1.25.2.2 yamt static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
225 1.25.2.2 yamt static void wapbl_inodetrk_free(struct wapbl *wl);
226 1.25.2.2 yamt static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
227 1.25.2.2 yamt
228 1.25.2.2 yamt static size_t wapbl_transaction_len(struct wapbl *wl);
229 1.25.2.2 yamt static __inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
230 1.25.2.2 yamt
231 1.25.2.2 yamt #if 0
232 1.25.2.2 yamt int wapbl_replay_verify(struct wapbl_replay *, struct vnode *);
233 1.25.2.2 yamt #endif
234 1.25.2.2 yamt
235 1.25.2.2 yamt static int wapbl_replay_isopen1(struct wapbl_replay *);
236 1.25.2.2 yamt
237 1.25.2.2 yamt /*
238 1.25.2.2 yamt * This is useful for debugging. If set, the log will
239 1.25.2.2 yamt * only be truncated when necessary.
240 1.25.2.2 yamt */
241 1.25.2.2 yamt int wapbl_lazy_truncate = 0;
242 1.25.2.2 yamt
243 1.25.2.2 yamt struct wapbl_ops wapbl_ops = {
244 1.25.2.2 yamt .wo_wapbl_discard = wapbl_discard,
245 1.25.2.2 yamt .wo_wapbl_replay_isopen = wapbl_replay_isopen1,
246 1.25.2.2 yamt .wo_wapbl_replay_can_read = wapbl_replay_can_read,
247 1.25.2.2 yamt .wo_wapbl_replay_read = wapbl_replay_read,
248 1.25.2.2 yamt .wo_wapbl_add_buf = wapbl_add_buf,
249 1.25.2.2 yamt .wo_wapbl_remove_buf = wapbl_remove_buf,
250 1.25.2.2 yamt .wo_wapbl_resize_buf = wapbl_resize_buf,
251 1.25.2.2 yamt .wo_wapbl_begin = wapbl_begin,
252 1.25.2.2 yamt .wo_wapbl_end = wapbl_end,
253 1.25.2.2 yamt .wo_wapbl_junlock_assert= wapbl_junlock_assert,
254 1.25.2.2 yamt
255 1.25.2.2 yamt /* XXX: the following is only used to say "this is a wapbl buf" */
256 1.25.2.2 yamt .wo_wapbl_biodone = wapbl_biodone,
257 1.25.2.2 yamt };
258 1.25.2.2 yamt
259 1.25.2.2 yamt void
260 1.25.2.2 yamt wapbl_init(void)
261 1.25.2.2 yamt {
262 1.25.2.2 yamt
263 1.25.2.2 yamt malloc_type_attach(M_WAPBL);
264 1.25.2.2 yamt }
265 1.25.2.2 yamt
266 1.25.2.2 yamt static int
267 1.25.2.2 yamt wapbl_start_flush_inodes(struct wapbl *wl, struct wapbl_replay *wr)
268 1.25.2.2 yamt {
269 1.25.2.2 yamt int error, i;
270 1.25.2.2 yamt
271 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
272 1.25.2.2 yamt ("wapbl_start: reusing log with %d inodes\n", wr->wr_inodescnt));
273 1.25.2.2 yamt
274 1.25.2.2 yamt /*
275 1.25.2.2 yamt * Its only valid to reuse the replay log if its
276 1.25.2.2 yamt * the same as the new log we just opened.
277 1.25.2.2 yamt */
278 1.25.2.2 yamt KDASSERT(!wapbl_replay_isopen(wr));
279 1.25.2.2 yamt KASSERT(wl->wl_devvp->v_rdev == wr->wr_devvp->v_rdev);
280 1.25.2.2 yamt KASSERT(wl->wl_logpbn == wr->wr_logpbn);
281 1.25.2.2 yamt KASSERT(wl->wl_circ_size == wr->wr_circ_size);
282 1.25.2.2 yamt KASSERT(wl->wl_circ_off == wr->wr_circ_off);
283 1.25.2.2 yamt KASSERT(wl->wl_log_dev_bshift == wr->wr_log_dev_bshift);
284 1.25.2.2 yamt KASSERT(wl->wl_fs_dev_bshift == wr->wr_fs_dev_bshift);
285 1.25.2.2 yamt
286 1.25.2.2 yamt wl->wl_wc_header->wc_generation = wr->wr_generation + 1;
287 1.25.2.2 yamt
288 1.25.2.2 yamt for (i = 0; i < wr->wr_inodescnt; i++)
289 1.25.2.2 yamt wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
290 1.25.2.2 yamt wr->wr_inodes[i].wr_imode);
291 1.25.2.2 yamt
292 1.25.2.2 yamt /* Make sure new transaction won't overwrite old inodes list */
293 1.25.2.2 yamt KDASSERT(wapbl_transaction_len(wl) <=
294 1.25.2.2 yamt wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
295 1.25.2.2 yamt wr->wr_inodestail));
296 1.25.2.2 yamt
297 1.25.2.2 yamt wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
298 1.25.2.2 yamt wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
299 1.25.2.2 yamt wapbl_transaction_len(wl);
300 1.25.2.2 yamt
301 1.25.2.2 yamt error = wapbl_write_inodes(wl, &wl->wl_head);
302 1.25.2.2 yamt if (error)
303 1.25.2.2 yamt return error;
304 1.25.2.2 yamt
305 1.25.2.2 yamt KASSERT(wl->wl_head != wl->wl_tail);
306 1.25.2.2 yamt KASSERT(wl->wl_head != 0);
307 1.25.2.2 yamt
308 1.25.2.2 yamt return 0;
309 1.25.2.2 yamt }
310 1.25.2.2 yamt
311 1.25.2.2 yamt int
312 1.25.2.2 yamt wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
313 1.25.2.2 yamt daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
314 1.25.2.2 yamt wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
315 1.25.2.2 yamt {
316 1.25.2.2 yamt struct wapbl *wl;
317 1.25.2.2 yamt struct vnode *devvp;
318 1.25.2.2 yamt daddr_t logpbn;
319 1.25.2.2 yamt int error;
320 1.25.2.2 yamt int log_dev_bshift = DEV_BSHIFT;
321 1.25.2.2 yamt int fs_dev_bshift = DEV_BSHIFT;
322 1.25.2.2 yamt int run;
323 1.25.2.2 yamt
324 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_start: vp=%p off=%" PRId64
325 1.25.2.2 yamt " count=%zu blksize=%zu\n", vp, off, count, blksize));
326 1.25.2.2 yamt
327 1.25.2.2 yamt if (log_dev_bshift > fs_dev_bshift) {
328 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_OPEN,
329 1.25.2.2 yamt ("wapbl: log device's block size cannot be larger "
330 1.25.2.2 yamt "than filesystem's\n"));
331 1.25.2.2 yamt /*
332 1.25.2.2 yamt * Not currently implemented, although it could be if
333 1.25.2.2 yamt * needed someday.
334 1.25.2.2 yamt */
335 1.25.2.2 yamt return ENOSYS;
336 1.25.2.2 yamt }
337 1.25.2.2 yamt
338 1.25.2.2 yamt if (off < 0)
339 1.25.2.2 yamt return EINVAL;
340 1.25.2.2 yamt
341 1.25.2.2 yamt if (blksize < DEV_BSIZE)
342 1.25.2.2 yamt return EINVAL;
343 1.25.2.2 yamt if (blksize % DEV_BSIZE)
344 1.25.2.2 yamt return EINVAL;
345 1.25.2.2 yamt
346 1.25.2.2 yamt /* XXXTODO: verify that the full load is writable */
347 1.25.2.2 yamt
348 1.25.2.2 yamt /*
349 1.25.2.2 yamt * XXX check for minimum log size
350 1.25.2.2 yamt * minimum is governed by minimum amount of space
351 1.25.2.2 yamt * to complete a transaction. (probably truncate)
352 1.25.2.2 yamt */
353 1.25.2.2 yamt /* XXX for now pick something minimal */
354 1.25.2.2 yamt if ((count * blksize) < MAXPHYS) {
355 1.25.2.2 yamt return ENOSPC;
356 1.25.2.2 yamt }
357 1.25.2.2 yamt
358 1.25.2.2 yamt if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
359 1.25.2.2 yamt return error;
360 1.25.2.2 yamt }
361 1.25.2.2 yamt
362 1.25.2.2 yamt wl = wapbl_calloc(1, sizeof(*wl));
363 1.25.2.2 yamt rw_init(&wl->wl_rwlock);
364 1.25.2.2 yamt mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
365 1.25.2.2 yamt cv_init(&wl->wl_reclaimable_cv, "wapblrec");
366 1.25.2.2 yamt LIST_INIT(&wl->wl_bufs);
367 1.25.2.2 yamt SIMPLEQ_INIT(&wl->wl_entries);
368 1.25.2.2 yamt
369 1.25.2.2 yamt wl->wl_logvp = vp;
370 1.25.2.2 yamt wl->wl_devvp = devvp;
371 1.25.2.2 yamt wl->wl_mount = mp;
372 1.25.2.2 yamt wl->wl_logpbn = logpbn;
373 1.25.2.2 yamt wl->wl_log_dev_bshift = log_dev_bshift;
374 1.25.2.2 yamt wl->wl_fs_dev_bshift = fs_dev_bshift;
375 1.25.2.2 yamt
376 1.25.2.2 yamt wl->wl_flush = flushfn;
377 1.25.2.2 yamt wl->wl_flush_abort = flushabortfn;
378 1.25.2.2 yamt
379 1.25.2.2 yamt /* Reserve two log device blocks for the commit headers */
380 1.25.2.2 yamt wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
381 1.25.2.2 yamt wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
382 1.25.2.2 yamt /* truncate the log usage to a multiple of log_dev_bshift */
383 1.25.2.2 yamt wl->wl_circ_size >>= wl->wl_log_dev_bshift;
384 1.25.2.2 yamt wl->wl_circ_size <<= wl->wl_log_dev_bshift;
385 1.25.2.2 yamt
386 1.25.2.2 yamt /*
387 1.25.2.2 yamt * wl_bufbytes_max limits the size of the in memory transaction space.
388 1.25.2.2 yamt * - Since buffers are allocated and accounted for in units of
389 1.25.2.2 yamt * PAGE_SIZE it is required to be a multiple of PAGE_SIZE
390 1.25.2.2 yamt * (i.e. 1<<PAGE_SHIFT)
391 1.25.2.2 yamt * - Since the log device has to be written in units of
392 1.25.2.2 yamt * 1<<wl_log_dev_bshift it is required to be a mulitple of
393 1.25.2.2 yamt * 1<<wl_log_dev_bshift.
394 1.25.2.2 yamt * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
395 1.25.2.2 yamt * it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
396 1.25.2.2 yamt * Therefore it must be multiple of the least common multiple of those
397 1.25.2.2 yamt * three quantities. Fortunately, all of those quantities are
398 1.25.2.2 yamt * guaranteed to be a power of two, and the least common multiple of
399 1.25.2.2 yamt * a set of numbers which are all powers of two is simply the maximum
400 1.25.2.2 yamt * of those numbers. Finally, the maximum logarithm of a power of two
401 1.25.2.2 yamt * is the same as the log of the maximum power of two. So we can do
402 1.25.2.2 yamt * the following operations to size wl_bufbytes_max:
403 1.25.2.2 yamt */
404 1.25.2.2 yamt
405 1.25.2.2 yamt /* XXX fix actual number of pages reserved per filesystem. */
406 1.25.2.2 yamt wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
407 1.25.2.2 yamt
408 1.25.2.2 yamt /* Round wl_bufbytes_max to the largest power of two constraint */
409 1.25.2.2 yamt wl->wl_bufbytes_max >>= PAGE_SHIFT;
410 1.25.2.2 yamt wl->wl_bufbytes_max <<= PAGE_SHIFT;
411 1.25.2.2 yamt wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
412 1.25.2.2 yamt wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
413 1.25.2.2 yamt wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
414 1.25.2.2 yamt wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
415 1.25.2.2 yamt
416 1.25.2.2 yamt /* XXX maybe use filesystem fragment size instead of 1024 */
417 1.25.2.2 yamt /* XXX fix actual number of buffers reserved per filesystem. */
418 1.25.2.2 yamt wl->wl_bufcount_max = (nbuf / 2) * 1024;
419 1.25.2.2 yamt
420 1.25.2.2 yamt /* XXX tie this into resource estimation */
421 1.25.2.2 yamt wl->wl_dealloclim = 2 * btodb(wl->wl_bufbytes_max);
422 1.25.2.2 yamt
423 1.25.2.2 yamt wl->wl_deallocblks = wapbl_malloc(sizeof(*wl->wl_deallocblks) *
424 1.25.2.2 yamt wl->wl_dealloclim);
425 1.25.2.2 yamt wl->wl_dealloclens = wapbl_malloc(sizeof(*wl->wl_dealloclens) *
426 1.25.2.2 yamt wl->wl_dealloclim);
427 1.25.2.2 yamt
428 1.25.2.2 yamt wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
429 1.25.2.2 yamt
430 1.25.2.2 yamt /* Initialize the commit header */
431 1.25.2.2 yamt {
432 1.25.2.2 yamt struct wapbl_wc_header *wc;
433 1.25.2.2 yamt size_t len = 1 << wl->wl_log_dev_bshift;
434 1.25.2.2 yamt wc = wapbl_calloc(1, len);
435 1.25.2.2 yamt wc->wc_type = WAPBL_WC_HEADER;
436 1.25.2.2 yamt wc->wc_len = len;
437 1.25.2.2 yamt wc->wc_circ_off = wl->wl_circ_off;
438 1.25.2.2 yamt wc->wc_circ_size = wl->wl_circ_size;
439 1.25.2.2 yamt /* XXX wc->wc_fsid */
440 1.25.2.2 yamt wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
441 1.25.2.2 yamt wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
442 1.25.2.2 yamt wl->wl_wc_header = wc;
443 1.25.2.2 yamt wl->wl_wc_scratch = wapbl_malloc(len);
444 1.25.2.2 yamt }
445 1.25.2.2 yamt
446 1.25.2.2 yamt /*
447 1.25.2.2 yamt * if there was an existing set of unlinked but
448 1.25.2.2 yamt * allocated inodes, preserve it in the new
449 1.25.2.2 yamt * log.
450 1.25.2.2 yamt */
451 1.25.2.2 yamt if (wr && wr->wr_inodescnt) {
452 1.25.2.2 yamt error = wapbl_start_flush_inodes(wl, wr);
453 1.25.2.2 yamt if (error)
454 1.25.2.2 yamt goto errout;
455 1.25.2.2 yamt }
456 1.25.2.2 yamt
457 1.25.2.2 yamt error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
458 1.25.2.2 yamt if (error) {
459 1.25.2.2 yamt goto errout;
460 1.25.2.2 yamt }
461 1.25.2.2 yamt
462 1.25.2.2 yamt *wlp = wl;
463 1.25.2.2 yamt #if defined(WAPBL_DEBUG)
464 1.25.2.2 yamt wapbl_debug_wl = wl;
465 1.25.2.2 yamt #endif
466 1.25.2.2 yamt
467 1.25.2.2 yamt return 0;
468 1.25.2.2 yamt errout:
469 1.25.2.2 yamt wapbl_discard(wl);
470 1.25.2.2 yamt wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
471 1.25.2.2 yamt wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
472 1.25.2.2 yamt wapbl_free(wl->wl_deallocblks,
473 1.25.2.2 yamt sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim);
474 1.25.2.2 yamt wapbl_free(wl->wl_dealloclens,
475 1.25.2.2 yamt sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim);
476 1.25.2.2 yamt wapbl_inodetrk_free(wl);
477 1.25.2.2 yamt wapbl_free(wl, sizeof(*wl));
478 1.25.2.2 yamt
479 1.25.2.2 yamt return error;
480 1.25.2.2 yamt }
481 1.25.2.2 yamt
482 1.25.2.2 yamt /*
483 1.25.2.2 yamt * Like wapbl_flush, only discards the transaction
484 1.25.2.2 yamt * completely
485 1.25.2.2 yamt */
486 1.25.2.2 yamt
487 1.25.2.2 yamt void
488 1.25.2.2 yamt wapbl_discard(struct wapbl *wl)
489 1.25.2.2 yamt {
490 1.25.2.2 yamt struct wapbl_entry *we;
491 1.25.2.2 yamt struct buf *bp;
492 1.25.2.2 yamt int i;
493 1.25.2.2 yamt
494 1.25.2.2 yamt /*
495 1.25.2.2 yamt * XXX we may consider using upgrade here
496 1.25.2.2 yamt * if we want to call flush from inside a transaction
497 1.25.2.2 yamt */
498 1.25.2.2 yamt rw_enter(&wl->wl_rwlock, RW_WRITER);
499 1.25.2.2 yamt wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
500 1.25.2.2 yamt wl->wl_dealloccnt);
501 1.25.2.2 yamt
502 1.25.2.2 yamt #ifdef WAPBL_DEBUG_PRINT
503 1.25.2.2 yamt {
504 1.25.2.2 yamt struct wapbl_entry *we;
505 1.25.2.2 yamt pid_t pid = -1;
506 1.25.2.2 yamt lwpid_t lid = -1;
507 1.25.2.2 yamt if (curproc)
508 1.25.2.2 yamt pid = curproc->p_pid;
509 1.25.2.2 yamt if (curlwp)
510 1.25.2.2 yamt lid = curlwp->l_lid;
511 1.25.2.2 yamt #ifdef WAPBL_DEBUG_BUFBYTES
512 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
513 1.25.2.2 yamt ("wapbl_discard: thread %d.%d discarding "
514 1.25.2.2 yamt "transaction\n"
515 1.25.2.2 yamt "\tbufcount=%zu bufbytes=%zu bcount=%zu "
516 1.25.2.2 yamt "deallocs=%d inodes=%d\n"
517 1.25.2.2 yamt "\terrcnt = %u, reclaimable=%zu reserved=%zu "
518 1.25.2.2 yamt "unsynced=%zu\n",
519 1.25.2.2 yamt pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
520 1.25.2.2 yamt wl->wl_bcount, wl->wl_dealloccnt,
521 1.25.2.2 yamt wl->wl_inohashcnt, wl->wl_error_count,
522 1.25.2.2 yamt wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
523 1.25.2.2 yamt wl->wl_unsynced_bufbytes));
524 1.25.2.2 yamt SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
525 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
526 1.25.2.2 yamt ("\tentry: bufcount = %zu, reclaimable = %zu, "
527 1.25.2.2 yamt "error = %d, unsynced = %zu\n",
528 1.25.2.2 yamt we->we_bufcount, we->we_reclaimable_bytes,
529 1.25.2.2 yamt we->we_error, we->we_unsynced_bufbytes));
530 1.25.2.2 yamt }
531 1.25.2.2 yamt #else /* !WAPBL_DEBUG_BUFBYTES */
532 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
533 1.25.2.2 yamt ("wapbl_discard: thread %d.%d discarding transaction\n"
534 1.25.2.2 yamt "\tbufcount=%zu bufbytes=%zu bcount=%zu "
535 1.25.2.2 yamt "deallocs=%d inodes=%d\n"
536 1.25.2.2 yamt "\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
537 1.25.2.2 yamt pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
538 1.25.2.2 yamt wl->wl_bcount, wl->wl_dealloccnt,
539 1.25.2.2 yamt wl->wl_inohashcnt, wl->wl_error_count,
540 1.25.2.2 yamt wl->wl_reclaimable_bytes, wl->wl_reserved_bytes));
541 1.25.2.2 yamt SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
542 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
543 1.25.2.2 yamt ("\tentry: bufcount = %zu, reclaimable = %zu, "
544 1.25.2.2 yamt "error = %d\n",
545 1.25.2.2 yamt we->we_bufcount, we->we_reclaimable_bytes,
546 1.25.2.2 yamt we->we_error));
547 1.25.2.2 yamt }
548 1.25.2.2 yamt #endif /* !WAPBL_DEBUG_BUFBYTES */
549 1.25.2.2 yamt }
550 1.25.2.2 yamt #endif /* WAPBL_DEBUG_PRINT */
551 1.25.2.2 yamt
552 1.25.2.2 yamt for (i = 0; i <= wl->wl_inohashmask; i++) {
553 1.25.2.2 yamt struct wapbl_ino_head *wih;
554 1.25.2.2 yamt struct wapbl_ino *wi;
555 1.25.2.2 yamt
556 1.25.2.2 yamt wih = &wl->wl_inohash[i];
557 1.25.2.2 yamt while ((wi = LIST_FIRST(wih)) != NULL) {
558 1.25.2.2 yamt LIST_REMOVE(wi, wi_hash);
559 1.25.2.2 yamt pool_put(&wapbl_ino_pool, wi);
560 1.25.2.2 yamt KASSERT(wl->wl_inohashcnt > 0);
561 1.25.2.2 yamt wl->wl_inohashcnt--;
562 1.25.2.2 yamt }
563 1.25.2.2 yamt }
564 1.25.2.2 yamt
565 1.25.2.2 yamt /*
566 1.25.2.2 yamt * clean buffer list
567 1.25.2.2 yamt */
568 1.25.2.2 yamt mutex_enter(&bufcache_lock);
569 1.25.2.2 yamt mutex_enter(&wl->wl_mtx);
570 1.25.2.2 yamt while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
571 1.25.2.2 yamt if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
572 1.25.2.2 yamt /*
573 1.25.2.2 yamt * The buffer will be unlocked and
574 1.25.2.2 yamt * removed from the transaction in brelse
575 1.25.2.2 yamt */
576 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
577 1.25.2.2 yamt brelsel(bp, 0);
578 1.25.2.2 yamt mutex_enter(&wl->wl_mtx);
579 1.25.2.2 yamt }
580 1.25.2.2 yamt }
581 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
582 1.25.2.2 yamt mutex_exit(&bufcache_lock);
583 1.25.2.2 yamt
584 1.25.2.2 yamt /*
585 1.25.2.2 yamt * Remove references to this wl from wl_entries, free any which
586 1.25.2.2 yamt * no longer have buffers, others will be freed in wapbl_biodone
587 1.25.2.2 yamt * when they no longer have any buffers.
588 1.25.2.2 yamt */
589 1.25.2.2 yamt while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
590 1.25.2.2 yamt SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
591 1.25.2.2 yamt /* XXX should we be accumulating wl_error_count
592 1.25.2.2 yamt * and increasing reclaimable bytes ? */
593 1.25.2.2 yamt we->we_wapbl = NULL;
594 1.25.2.2 yamt if (we->we_bufcount == 0) {
595 1.25.2.2 yamt #ifdef WAPBL_DEBUG_BUFBYTES
596 1.25.2.2 yamt KASSERT(we->we_unsynced_bufbytes == 0);
597 1.25.2.2 yamt #endif
598 1.25.2.2 yamt wapbl_free(we, sizeof(*we));
599 1.25.2.2 yamt }
600 1.25.2.2 yamt }
601 1.25.2.2 yamt
602 1.25.2.2 yamt /* Discard list of deallocs */
603 1.25.2.2 yamt wl->wl_dealloccnt = 0;
604 1.25.2.2 yamt /* XXX should we clear wl_reserved_bytes? */
605 1.25.2.2 yamt
606 1.25.2.2 yamt KASSERT(wl->wl_bufbytes == 0);
607 1.25.2.2 yamt KASSERT(wl->wl_bcount == 0);
608 1.25.2.2 yamt KASSERT(wl->wl_bufcount == 0);
609 1.25.2.2 yamt KASSERT(LIST_EMPTY(&wl->wl_bufs));
610 1.25.2.2 yamt KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
611 1.25.2.2 yamt KASSERT(wl->wl_inohashcnt == 0);
612 1.25.2.2 yamt
613 1.25.2.2 yamt rw_exit(&wl->wl_rwlock);
614 1.25.2.2 yamt }
615 1.25.2.2 yamt
616 1.25.2.2 yamt int
617 1.25.2.2 yamt wapbl_stop(struct wapbl *wl, int force)
618 1.25.2.2 yamt {
619 1.25.2.2 yamt struct vnode *vp;
620 1.25.2.2 yamt int error;
621 1.25.2.2 yamt
622 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
623 1.25.2.2 yamt error = wapbl_flush(wl, 1);
624 1.25.2.2 yamt if (error) {
625 1.25.2.2 yamt if (force)
626 1.25.2.2 yamt wapbl_discard(wl);
627 1.25.2.2 yamt else
628 1.25.2.2 yamt return error;
629 1.25.2.2 yamt }
630 1.25.2.2 yamt
631 1.25.2.2 yamt /* Unlinked inodes persist after a flush */
632 1.25.2.2 yamt if (wl->wl_inohashcnt) {
633 1.25.2.2 yamt if (force) {
634 1.25.2.2 yamt wapbl_discard(wl);
635 1.25.2.2 yamt } else {
636 1.25.2.2 yamt return EBUSY;
637 1.25.2.2 yamt }
638 1.25.2.2 yamt }
639 1.25.2.2 yamt
640 1.25.2.2 yamt KASSERT(wl->wl_bufbytes == 0);
641 1.25.2.2 yamt KASSERT(wl->wl_bcount == 0);
642 1.25.2.2 yamt KASSERT(wl->wl_bufcount == 0);
643 1.25.2.2 yamt KASSERT(LIST_EMPTY(&wl->wl_bufs));
644 1.25.2.2 yamt KASSERT(wl->wl_dealloccnt == 0);
645 1.25.2.2 yamt KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
646 1.25.2.2 yamt KASSERT(wl->wl_inohashcnt == 0);
647 1.25.2.2 yamt
648 1.25.2.2 yamt vp = wl->wl_logvp;
649 1.25.2.2 yamt
650 1.25.2.2 yamt wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
651 1.25.2.2 yamt wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
652 1.25.2.2 yamt wapbl_free(wl->wl_deallocblks,
653 1.25.2.2 yamt sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim);
654 1.25.2.2 yamt wapbl_free(wl->wl_dealloclens,
655 1.25.2.2 yamt sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim);
656 1.25.2.2 yamt wapbl_inodetrk_free(wl);
657 1.25.2.2 yamt
658 1.25.2.2 yamt cv_destroy(&wl->wl_reclaimable_cv);
659 1.25.2.2 yamt mutex_destroy(&wl->wl_mtx);
660 1.25.2.2 yamt rw_destroy(&wl->wl_rwlock);
661 1.25.2.2 yamt wapbl_free(wl, sizeof(*wl));
662 1.25.2.2 yamt
663 1.25.2.2 yamt return 0;
664 1.25.2.2 yamt }
665 1.25.2.2 yamt
666 1.25.2.2 yamt static int
667 1.25.2.2 yamt wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
668 1.25.2.2 yamt {
669 1.25.2.2 yamt struct pstats *pstats = curlwp->l_proc->p_stats;
670 1.25.2.2 yamt struct buf *bp;
671 1.25.2.2 yamt int error;
672 1.25.2.2 yamt
673 1.25.2.2 yamt KASSERT((flags & ~(B_WRITE | B_READ)) == 0);
674 1.25.2.2 yamt KASSERT(devvp->v_type == VBLK);
675 1.25.2.2 yamt
676 1.25.2.2 yamt if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
677 1.25.2.2 yamt mutex_enter(&devvp->v_interlock);
678 1.25.2.2 yamt devvp->v_numoutput++;
679 1.25.2.2 yamt mutex_exit(&devvp->v_interlock);
680 1.25.2.2 yamt pstats->p_ru.ru_oublock++;
681 1.25.2.2 yamt } else {
682 1.25.2.2 yamt pstats->p_ru.ru_inblock++;
683 1.25.2.2 yamt }
684 1.25.2.2 yamt
685 1.25.2.2 yamt bp = getiobuf(devvp, true);
686 1.25.2.2 yamt bp->b_flags = flags;
687 1.25.2.2 yamt bp->b_cflags = BC_BUSY; /* silly & dubious */
688 1.25.2.2 yamt bp->b_dev = devvp->v_rdev;
689 1.25.2.2 yamt bp->b_data = data;
690 1.25.2.2 yamt bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
691 1.25.2.2 yamt bp->b_blkno = pbn;
692 1.25.2.2 yamt
693 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_IO,
694 1.25.2.2 yamt ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%x\n",
695 1.25.2.2 yamt BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
696 1.25.2.2 yamt bp->b_blkno, bp->b_dev));
697 1.25.2.2 yamt
698 1.25.2.2 yamt VOP_STRATEGY(devvp, bp);
699 1.25.2.2 yamt
700 1.25.2.2 yamt error = biowait(bp);
701 1.25.2.2 yamt putiobuf(bp);
702 1.25.2.2 yamt
703 1.25.2.2 yamt if (error) {
704 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_ERROR,
705 1.25.2.2 yamt ("wapbl_doio: %s %zu bytes at block %" PRId64
706 1.25.2.2 yamt " on dev 0x%x failed with error %d\n",
707 1.25.2.2 yamt (((flags & (B_WRITE | B_READ)) == B_WRITE) ?
708 1.25.2.2 yamt "write" : "read"),
709 1.25.2.2 yamt len, pbn, devvp->v_rdev, error));
710 1.25.2.2 yamt }
711 1.25.2.2 yamt
712 1.25.2.2 yamt return error;
713 1.25.2.2 yamt }
714 1.25.2.2 yamt
715 1.25.2.2 yamt int
716 1.25.2.2 yamt wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
717 1.25.2.2 yamt {
718 1.25.2.2 yamt
719 1.25.2.2 yamt return wapbl_doio(data, len, devvp, pbn, B_WRITE);
720 1.25.2.2 yamt }
721 1.25.2.2 yamt
722 1.25.2.2 yamt int
723 1.25.2.2 yamt wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
724 1.25.2.2 yamt {
725 1.25.2.2 yamt
726 1.25.2.2 yamt return wapbl_doio(data, len, devvp, pbn, B_READ);
727 1.25.2.2 yamt }
728 1.25.2.2 yamt
729 1.25.2.2 yamt /*
730 1.25.2.2 yamt * Off is byte offset returns new offset for next write
731 1.25.2.2 yamt * handles log wraparound
732 1.25.2.2 yamt */
733 1.25.2.2 yamt static int
734 1.25.2.2 yamt wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
735 1.25.2.2 yamt {
736 1.25.2.2 yamt size_t slen;
737 1.25.2.2 yamt off_t off = *offp;
738 1.25.2.2 yamt int error;
739 1.25.2.2 yamt
740 1.25.2.2 yamt KDASSERT(((len >> wl->wl_log_dev_bshift) <<
741 1.25.2.2 yamt wl->wl_log_dev_bshift) == len);
742 1.25.2.2 yamt
743 1.25.2.2 yamt if (off < wl->wl_circ_off)
744 1.25.2.2 yamt off = wl->wl_circ_off;
745 1.25.2.2 yamt slen = wl->wl_circ_off + wl->wl_circ_size - off;
746 1.25.2.2 yamt if (slen < len) {
747 1.25.2.2 yamt error = wapbl_write(data, slen, wl->wl_devvp,
748 1.25.2.2 yamt wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
749 1.25.2.2 yamt if (error)
750 1.25.2.2 yamt return error;
751 1.25.2.2 yamt data = (uint8_t *)data + slen;
752 1.25.2.2 yamt len -= slen;
753 1.25.2.2 yamt off = wl->wl_circ_off;
754 1.25.2.2 yamt }
755 1.25.2.2 yamt error = wapbl_write(data, len, wl->wl_devvp,
756 1.25.2.2 yamt wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
757 1.25.2.2 yamt if (error)
758 1.25.2.2 yamt return error;
759 1.25.2.2 yamt off += len;
760 1.25.2.2 yamt if (off >= wl->wl_circ_off + wl->wl_circ_size)
761 1.25.2.2 yamt off = wl->wl_circ_off;
762 1.25.2.2 yamt *offp = off;
763 1.25.2.2 yamt return 0;
764 1.25.2.2 yamt }
765 1.25.2.2 yamt
766 1.25.2.2 yamt /****************************************************************/
767 1.25.2.2 yamt
768 1.25.2.2 yamt int
769 1.25.2.2 yamt wapbl_begin(struct wapbl *wl, const char *file, int line)
770 1.25.2.2 yamt {
771 1.25.2.2 yamt int doflush;
772 1.25.2.2 yamt unsigned lockcount;
773 1.25.2.2 yamt
774 1.25.2.2 yamt KDASSERT(wl);
775 1.25.2.2 yamt
776 1.25.2.2 yamt /*
777 1.25.2.2 yamt * XXX this needs to be made much more sophisticated.
778 1.25.2.2 yamt * perhaps each wapbl_begin could reserve a specified
779 1.25.2.2 yamt * number of buffers and bytes.
780 1.25.2.2 yamt */
781 1.25.2.2 yamt mutex_enter(&wl->wl_mtx);
782 1.25.2.2 yamt lockcount = wl->wl_lock_count;
783 1.25.2.2 yamt doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) >
784 1.25.2.2 yamt wl->wl_bufbytes_max / 2) ||
785 1.25.2.2 yamt ((wl->wl_bufcount + (lockcount * 10)) >
786 1.25.2.2 yamt wl->wl_bufcount_max / 2) ||
787 1.25.2.2 yamt (wapbl_transaction_len(wl) > wl->wl_circ_size / 2);
788 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
789 1.25.2.2 yamt
790 1.25.2.2 yamt if (doflush) {
791 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
792 1.25.2.2 yamt ("force flush lockcnt=%d bufbytes=%zu "
793 1.25.2.2 yamt "(max=%zu) bufcount=%zu (max=%zu)\n",
794 1.25.2.2 yamt lockcount, wl->wl_bufbytes,
795 1.25.2.2 yamt wl->wl_bufbytes_max, wl->wl_bufcount,
796 1.25.2.2 yamt wl->wl_bufcount_max));
797 1.25.2.2 yamt }
798 1.25.2.2 yamt
799 1.25.2.2 yamt if (doflush) {
800 1.25.2.2 yamt int error = wapbl_flush(wl, 0);
801 1.25.2.2 yamt if (error)
802 1.25.2.2 yamt return error;
803 1.25.2.2 yamt }
804 1.25.2.2 yamt
805 1.25.2.2 yamt rw_enter(&wl->wl_rwlock, RW_READER);
806 1.25.2.2 yamt mutex_enter(&wl->wl_mtx);
807 1.25.2.2 yamt wl->wl_lock_count++;
808 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
809 1.25.2.2 yamt
810 1.25.2.2 yamt #if defined(WAPBL_DEBUG_PRINT)
811 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
812 1.25.2.2 yamt ("wapbl_begin thread %d.%d with bufcount=%zu "
813 1.25.2.2 yamt "bufbytes=%zu bcount=%zu at %s:%d\n",
814 1.25.2.2 yamt curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
815 1.25.2.2 yamt wl->wl_bufbytes, wl->wl_bcount, file, line));
816 1.25.2.2 yamt #endif
817 1.25.2.2 yamt
818 1.25.2.2 yamt return 0;
819 1.25.2.2 yamt }
820 1.25.2.2 yamt
821 1.25.2.2 yamt void
822 1.25.2.2 yamt wapbl_end(struct wapbl *wl)
823 1.25.2.2 yamt {
824 1.25.2.2 yamt
825 1.25.2.2 yamt #if defined(WAPBL_DEBUG_PRINT)
826 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
827 1.25.2.2 yamt ("wapbl_end thread %d.%d with bufcount=%zu "
828 1.25.2.2 yamt "bufbytes=%zu bcount=%zu\n",
829 1.25.2.2 yamt curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
830 1.25.2.2 yamt wl->wl_bufbytes, wl->wl_bcount));
831 1.25.2.2 yamt #endif
832 1.25.2.2 yamt
833 1.25.2.2 yamt mutex_enter(&wl->wl_mtx);
834 1.25.2.2 yamt KASSERT(wl->wl_lock_count > 0);
835 1.25.2.2 yamt wl->wl_lock_count--;
836 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
837 1.25.2.2 yamt
838 1.25.2.2 yamt rw_exit(&wl->wl_rwlock);
839 1.25.2.2 yamt }
840 1.25.2.2 yamt
841 1.25.2.2 yamt void
842 1.25.2.2 yamt wapbl_add_buf(struct wapbl *wl, struct buf * bp)
843 1.25.2.2 yamt {
844 1.25.2.2 yamt
845 1.25.2.2 yamt KASSERT(bp->b_cflags & BC_BUSY);
846 1.25.2.2 yamt KASSERT(bp->b_vp);
847 1.25.2.2 yamt
848 1.25.2.2 yamt wapbl_jlock_assert(wl);
849 1.25.2.2 yamt
850 1.25.2.2 yamt #if 0
851 1.25.2.2 yamt /*
852 1.25.2.2 yamt * XXX this might be an issue for swapfiles.
853 1.25.2.2 yamt * see uvm_swap.c:1702
854 1.25.2.2 yamt *
855 1.25.2.2 yamt * XXX2 why require it then? leap of semantics?
856 1.25.2.2 yamt */
857 1.25.2.2 yamt KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
858 1.25.2.2 yamt #endif
859 1.25.2.2 yamt
860 1.25.2.2 yamt mutex_enter(&wl->wl_mtx);
861 1.25.2.2 yamt if (bp->b_flags & B_LOCKED) {
862 1.25.2.2 yamt LIST_REMOVE(bp, b_wapbllist);
863 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
864 1.25.2.2 yamt ("wapbl_add_buf thread %d.%d re-adding buf %p "
865 1.25.2.2 yamt "with %d bytes %d bcount\n",
866 1.25.2.2 yamt curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
867 1.25.2.2 yamt bp->b_bcount));
868 1.25.2.2 yamt } else {
869 1.25.2.2 yamt /* unlocked by dirty buffers shouldn't exist */
870 1.25.2.2 yamt KASSERT(!(bp->b_oflags & BO_DELWRI));
871 1.25.2.2 yamt wl->wl_bufbytes += bp->b_bufsize;
872 1.25.2.2 yamt wl->wl_bcount += bp->b_bcount;
873 1.25.2.2 yamt wl->wl_bufcount++;
874 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
875 1.25.2.2 yamt ("wapbl_add_buf thread %d.%d adding buf %p "
876 1.25.2.2 yamt "with %d bytes %d bcount\n",
877 1.25.2.2 yamt curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
878 1.25.2.2 yamt bp->b_bcount));
879 1.25.2.2 yamt }
880 1.25.2.2 yamt LIST_INSERT_HEAD(&wl->wl_bufs, bp, b_wapbllist);
881 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
882 1.25.2.2 yamt
883 1.25.2.2 yamt bp->b_flags |= B_LOCKED;
884 1.25.2.2 yamt }
885 1.25.2.2 yamt
886 1.25.2.2 yamt static void
887 1.25.2.2 yamt wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
888 1.25.2.2 yamt {
889 1.25.2.2 yamt
890 1.25.2.2 yamt KASSERT(mutex_owned(&wl->wl_mtx));
891 1.25.2.2 yamt KASSERT(bp->b_cflags & BC_BUSY);
892 1.25.2.2 yamt wapbl_jlock_assert(wl);
893 1.25.2.2 yamt
894 1.25.2.2 yamt #if 0
895 1.25.2.2 yamt /*
896 1.25.2.2 yamt * XXX this might be an issue for swapfiles.
897 1.25.2.2 yamt * see uvm_swap.c:1725
898 1.25.2.2 yamt *
899 1.25.2.2 yamt * XXXdeux: see above
900 1.25.2.2 yamt */
901 1.25.2.2 yamt KASSERT((bp->b_flags & BC_NOCACHE) == 0);
902 1.25.2.2 yamt #endif
903 1.25.2.2 yamt KASSERT(bp->b_flags & B_LOCKED);
904 1.25.2.2 yamt
905 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
906 1.25.2.2 yamt ("wapbl_remove_buf thread %d.%d removing buf %p with "
907 1.25.2.2 yamt "%d bytes %d bcount\n",
908 1.25.2.2 yamt curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, bp->b_bcount));
909 1.25.2.2 yamt
910 1.25.2.2 yamt KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
911 1.25.2.2 yamt wl->wl_bufbytes -= bp->b_bufsize;
912 1.25.2.2 yamt KASSERT(wl->wl_bcount >= bp->b_bcount);
913 1.25.2.2 yamt wl->wl_bcount -= bp->b_bcount;
914 1.25.2.2 yamt KASSERT(wl->wl_bufcount > 0);
915 1.25.2.2 yamt wl->wl_bufcount--;
916 1.25.2.2 yamt KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
917 1.25.2.2 yamt KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
918 1.25.2.2 yamt LIST_REMOVE(bp, b_wapbllist);
919 1.25.2.2 yamt
920 1.25.2.2 yamt bp->b_flags &= ~B_LOCKED;
921 1.25.2.2 yamt }
922 1.25.2.2 yamt
923 1.25.2.2 yamt /* called from brelsel() in vfs_bio among other places */
924 1.25.2.2 yamt void
925 1.25.2.2 yamt wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
926 1.25.2.2 yamt {
927 1.25.2.2 yamt
928 1.25.2.2 yamt mutex_enter(&wl->wl_mtx);
929 1.25.2.2 yamt wapbl_remove_buf_locked(wl, bp);
930 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
931 1.25.2.2 yamt }
932 1.25.2.2 yamt
933 1.25.2.2 yamt void
934 1.25.2.2 yamt wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
935 1.25.2.2 yamt {
936 1.25.2.2 yamt
937 1.25.2.2 yamt KASSERT(bp->b_cflags & BC_BUSY);
938 1.25.2.2 yamt
939 1.25.2.2 yamt /*
940 1.25.2.2 yamt * XXX: why does this depend on B_LOCKED? otherwise the buf
941 1.25.2.2 yamt * is not for a transaction? if so, why is this called in the
942 1.25.2.2 yamt * first place?
943 1.25.2.2 yamt */
944 1.25.2.2 yamt if (bp->b_flags & B_LOCKED) {
945 1.25.2.2 yamt mutex_enter(&wl->wl_mtx);
946 1.25.2.2 yamt wl->wl_bufbytes += bp->b_bufsize - oldsz;
947 1.25.2.2 yamt wl->wl_bcount += bp->b_bcount - oldcnt;
948 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
949 1.25.2.2 yamt }
950 1.25.2.2 yamt }
951 1.25.2.2 yamt
952 1.25.2.2 yamt #endif /* _KERNEL */
953 1.25.2.2 yamt
954 1.25.2.2 yamt /****************************************************************/
955 1.25.2.2 yamt /* Some utility inlines */
956 1.25.2.2 yamt
957 1.25.2.2 yamt /* This is used to advance the pointer at old to new value at old+delta */
958 1.25.2.2 yamt static __inline off_t
959 1.25.2.2 yamt wapbl_advance(size_t size, size_t off, off_t old, size_t delta)
960 1.25.2.2 yamt {
961 1.25.2.2 yamt off_t new;
962 1.25.2.2 yamt
963 1.25.2.2 yamt /* Define acceptable ranges for inputs. */
964 1.25.2.2 yamt KASSERT(delta <= size);
965 1.25.2.2 yamt KASSERT((old == 0) || (old >= off));
966 1.25.2.2 yamt KASSERT(old < (size + off));
967 1.25.2.2 yamt
968 1.25.2.2 yamt if ((old == 0) && (delta != 0))
969 1.25.2.2 yamt new = off + delta;
970 1.25.2.2 yamt else if ((old + delta) < (size + off))
971 1.25.2.2 yamt new = old + delta;
972 1.25.2.2 yamt else
973 1.25.2.2 yamt new = (old + delta) - size;
974 1.25.2.2 yamt
975 1.25.2.2 yamt /* Note some interesting axioms */
976 1.25.2.2 yamt KASSERT((delta != 0) || (new == old));
977 1.25.2.2 yamt KASSERT((delta == 0) || (new != 0));
978 1.25.2.2 yamt KASSERT((delta != (size)) || (new == old));
979 1.25.2.2 yamt
980 1.25.2.2 yamt /* Define acceptable ranges for output. */
981 1.25.2.2 yamt KASSERT((new == 0) || (new >= off));
982 1.25.2.2 yamt KASSERT(new < (size + off));
983 1.25.2.2 yamt return new;
984 1.25.2.2 yamt }
985 1.25.2.2 yamt
986 1.25.2.2 yamt static __inline size_t
987 1.25.2.2 yamt wapbl_space_used(size_t avail, off_t head, off_t tail)
988 1.25.2.2 yamt {
989 1.25.2.2 yamt
990 1.25.2.2 yamt if (tail == 0) {
991 1.25.2.2 yamt KASSERT(head == 0);
992 1.25.2.2 yamt return 0;
993 1.25.2.2 yamt }
994 1.25.2.2 yamt return ((head + (avail - 1) - tail) % avail) + 1;
995 1.25.2.2 yamt }
996 1.25.2.2 yamt
997 1.25.2.2 yamt static __inline size_t
998 1.25.2.2 yamt wapbl_space_free(size_t avail, off_t head, off_t tail)
999 1.25.2.2 yamt {
1000 1.25.2.2 yamt
1001 1.25.2.2 yamt return avail - wapbl_space_used(avail, head, tail);
1002 1.25.2.2 yamt }
1003 1.25.2.2 yamt
1004 1.25.2.2 yamt static __inline void
1005 1.25.2.2 yamt wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1006 1.25.2.2 yamt off_t *tailp)
1007 1.25.2.2 yamt {
1008 1.25.2.2 yamt off_t head = *headp;
1009 1.25.2.2 yamt off_t tail = *tailp;
1010 1.25.2.2 yamt
1011 1.25.2.2 yamt KASSERT(delta <= wapbl_space_free(size, head, tail));
1012 1.25.2.2 yamt head = wapbl_advance(size, off, head, delta);
1013 1.25.2.2 yamt if ((tail == 0) && (head != 0))
1014 1.25.2.2 yamt tail = off;
1015 1.25.2.2 yamt *headp = head;
1016 1.25.2.2 yamt *tailp = tail;
1017 1.25.2.2 yamt }
1018 1.25.2.2 yamt
1019 1.25.2.2 yamt static __inline void
1020 1.25.2.2 yamt wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1021 1.25.2.2 yamt off_t *tailp)
1022 1.25.2.2 yamt {
1023 1.25.2.2 yamt off_t head = *headp;
1024 1.25.2.2 yamt off_t tail = *tailp;
1025 1.25.2.2 yamt
1026 1.25.2.2 yamt KASSERT(delta <= wapbl_space_used(size, head, tail));
1027 1.25.2.2 yamt tail = wapbl_advance(size, off, tail, delta);
1028 1.25.2.2 yamt if (head == tail) {
1029 1.25.2.2 yamt head = tail = 0;
1030 1.25.2.2 yamt }
1031 1.25.2.2 yamt *headp = head;
1032 1.25.2.2 yamt *tailp = tail;
1033 1.25.2.2 yamt }
1034 1.25.2.2 yamt
1035 1.25.2.2 yamt #ifdef _KERNEL
1036 1.25.2.2 yamt
1037 1.25.2.2 yamt /****************************************************************/
1038 1.25.2.2 yamt
1039 1.25.2.2 yamt /*
1040 1.25.2.2 yamt * Remove transactions whose buffers are completely flushed to disk.
1041 1.25.2.2 yamt * Will block until at least minfree space is available.
1042 1.25.2.2 yamt * only intended to be called from inside wapbl_flush and therefore
1043 1.25.2.2 yamt * does not protect against commit races with itself or with flush.
1044 1.25.2.2 yamt */
1045 1.25.2.2 yamt static int
1046 1.25.2.2 yamt wapbl_truncate(struct wapbl *wl, size_t minfree, int waitonly)
1047 1.25.2.2 yamt {
1048 1.25.2.2 yamt size_t delta;
1049 1.25.2.2 yamt size_t avail;
1050 1.25.2.2 yamt off_t head;
1051 1.25.2.2 yamt off_t tail;
1052 1.25.2.2 yamt int error = 0;
1053 1.25.2.2 yamt
1054 1.25.2.2 yamt KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1055 1.25.2.2 yamt KASSERT(rw_write_held(&wl->wl_rwlock));
1056 1.25.2.2 yamt
1057 1.25.2.2 yamt mutex_enter(&wl->wl_mtx);
1058 1.25.2.2 yamt
1059 1.25.2.2 yamt /*
1060 1.25.2.2 yamt * First check to see if we have to do a commit
1061 1.25.2.2 yamt * at all.
1062 1.25.2.2 yamt */
1063 1.25.2.2 yamt avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1064 1.25.2.2 yamt if (minfree < avail) {
1065 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
1066 1.25.2.2 yamt return 0;
1067 1.25.2.2 yamt }
1068 1.25.2.2 yamt minfree -= avail;
1069 1.25.2.2 yamt while ((wl->wl_error_count == 0) &&
1070 1.25.2.2 yamt (wl->wl_reclaimable_bytes < minfree)) {
1071 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1072 1.25.2.2 yamt ("wapbl_truncate: sleeping on %p wl=%p bytes=%zd "
1073 1.25.2.2 yamt "minfree=%zd\n",
1074 1.25.2.2 yamt &wl->wl_reclaimable_bytes, wl, wl->wl_reclaimable_bytes,
1075 1.25.2.2 yamt minfree));
1076 1.25.2.2 yamt
1077 1.25.2.2 yamt cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1078 1.25.2.2 yamt }
1079 1.25.2.2 yamt if (wl->wl_reclaimable_bytes < minfree) {
1080 1.25.2.2 yamt KASSERT(wl->wl_error_count);
1081 1.25.2.2 yamt /* XXX maybe get actual error from buffer instead someday? */
1082 1.25.2.2 yamt error = EIO;
1083 1.25.2.2 yamt }
1084 1.25.2.2 yamt head = wl->wl_head;
1085 1.25.2.2 yamt tail = wl->wl_tail;
1086 1.25.2.2 yamt delta = wl->wl_reclaimable_bytes;
1087 1.25.2.2 yamt
1088 1.25.2.2 yamt /* If all of of the entries are flushed, then be sure to keep
1089 1.25.2.2 yamt * the reserved bytes reserved. Watch out for discarded transactions,
1090 1.25.2.2 yamt * which could leave more bytes reserved than are reclaimable.
1091 1.25.2.2 yamt */
1092 1.25.2.2 yamt if (SIMPLEQ_EMPTY(&wl->wl_entries) &&
1093 1.25.2.2 yamt (delta >= wl->wl_reserved_bytes)) {
1094 1.25.2.2 yamt delta -= wl->wl_reserved_bytes;
1095 1.25.2.2 yamt }
1096 1.25.2.2 yamt wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1097 1.25.2.2 yamt &tail);
1098 1.25.2.2 yamt KDASSERT(wl->wl_reserved_bytes <=
1099 1.25.2.2 yamt wapbl_space_used(wl->wl_circ_size, head, tail));
1100 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
1101 1.25.2.2 yamt
1102 1.25.2.2 yamt if (error)
1103 1.25.2.2 yamt return error;
1104 1.25.2.2 yamt
1105 1.25.2.2 yamt if (waitonly)
1106 1.25.2.2 yamt return 0;
1107 1.25.2.2 yamt
1108 1.25.2.2 yamt /*
1109 1.25.2.2 yamt * This is where head, tail and delta are unprotected
1110 1.25.2.2 yamt * from races against itself or flush. This is ok since
1111 1.25.2.2 yamt * we only call this routine from inside flush itself.
1112 1.25.2.2 yamt *
1113 1.25.2.2 yamt * XXX: how can it race against itself when accessed only
1114 1.25.2.2 yamt * from behind the write-locked rwlock?
1115 1.25.2.2 yamt */
1116 1.25.2.2 yamt error = wapbl_write_commit(wl, head, tail);
1117 1.25.2.2 yamt if (error)
1118 1.25.2.2 yamt return error;
1119 1.25.2.2 yamt
1120 1.25.2.2 yamt wl->wl_head = head;
1121 1.25.2.2 yamt wl->wl_tail = tail;
1122 1.25.2.2 yamt
1123 1.25.2.2 yamt mutex_enter(&wl->wl_mtx);
1124 1.25.2.2 yamt KASSERT(wl->wl_reclaimable_bytes >= delta);
1125 1.25.2.2 yamt wl->wl_reclaimable_bytes -= delta;
1126 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
1127 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1128 1.25.2.2 yamt ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1129 1.25.2.2 yamt curproc->p_pid, curlwp->l_lid, delta));
1130 1.25.2.2 yamt
1131 1.25.2.2 yamt return 0;
1132 1.25.2.2 yamt }
1133 1.25.2.2 yamt
1134 1.25.2.2 yamt /****************************************************************/
1135 1.25.2.2 yamt
1136 1.25.2.2 yamt void
1137 1.25.2.2 yamt wapbl_biodone(struct buf *bp)
1138 1.25.2.2 yamt {
1139 1.25.2.2 yamt struct wapbl_entry *we = bp->b_private;
1140 1.25.2.2 yamt struct wapbl *wl = we->we_wapbl;
1141 1.25.2.2 yamt
1142 1.25.2.2 yamt /*
1143 1.25.2.2 yamt * Handle possible flushing of buffers after log has been
1144 1.25.2.2 yamt * decomissioned.
1145 1.25.2.2 yamt */
1146 1.25.2.2 yamt if (!wl) {
1147 1.25.2.2 yamt KASSERT(we->we_bufcount > 0);
1148 1.25.2.2 yamt we->we_bufcount--;
1149 1.25.2.2 yamt #ifdef WAPBL_DEBUG_BUFBYTES
1150 1.25.2.2 yamt KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1151 1.25.2.2 yamt we->we_unsynced_bufbytes -= bp->b_bufsize;
1152 1.25.2.2 yamt #endif
1153 1.25.2.2 yamt
1154 1.25.2.2 yamt if (we->we_bufcount == 0) {
1155 1.25.2.2 yamt #ifdef WAPBL_DEBUG_BUFBYTES
1156 1.25.2.2 yamt KASSERT(we->we_unsynced_bufbytes == 0);
1157 1.25.2.2 yamt #endif
1158 1.25.2.2 yamt wapbl_free(we, sizeof(*we));
1159 1.25.2.2 yamt }
1160 1.25.2.2 yamt
1161 1.25.2.2 yamt brelse(bp, 0);
1162 1.25.2.2 yamt return;
1163 1.25.2.2 yamt }
1164 1.25.2.2 yamt
1165 1.25.2.2 yamt #ifdef ohbother
1166 1.25.2.2 yamt KDASSERT(bp->b_flags & B_DONE);
1167 1.25.2.2 yamt KDASSERT(!(bp->b_flags & B_DELWRI));
1168 1.25.2.2 yamt KDASSERT(bp->b_flags & B_ASYNC);
1169 1.25.2.2 yamt KDASSERT(bp->b_flags & B_BUSY);
1170 1.25.2.2 yamt KDASSERT(!(bp->b_flags & B_LOCKED));
1171 1.25.2.2 yamt KDASSERT(!(bp->b_flags & B_READ));
1172 1.25.2.2 yamt KDASSERT(!(bp->b_flags & B_INVAL));
1173 1.25.2.2 yamt KDASSERT(!(bp->b_flags & B_NOCACHE));
1174 1.25.2.2 yamt #endif
1175 1.25.2.2 yamt
1176 1.25.2.2 yamt if (bp->b_error) {
1177 1.25.2.2 yamt #ifdef notyet /* Can't currently handle possible dirty buffer reuse */
1178 1.25.2.3 yamt /*
1179 1.25.2.3 yamt * XXXpooka: interfaces not fully updated
1180 1.25.2.3 yamt * Note: this was not enabled in the original patch
1181 1.25.2.3 yamt * against netbsd4 either. I don't know if comment
1182 1.25.2.3 yamt * above is true or not.
1183 1.25.2.3 yamt */
1184 1.25.2.2 yamt
1185 1.25.2.2 yamt /*
1186 1.25.2.2 yamt * If an error occurs, report the error and leave the
1187 1.25.2.2 yamt * buffer as a delayed write on the LRU queue.
1188 1.25.2.2 yamt * restarting the write would likely result in
1189 1.25.2.2 yamt * an error spinloop, so let it be done harmlessly
1190 1.25.2.2 yamt * by the syncer.
1191 1.25.2.2 yamt */
1192 1.25.2.2 yamt bp->b_flags &= ~(B_DONE);
1193 1.25.2.2 yamt simple_unlock(&bp->b_interlock);
1194 1.25.2.2 yamt
1195 1.25.2.2 yamt if (we->we_error == 0) {
1196 1.25.2.2 yamt mutex_enter(&wl->wl_mtx);
1197 1.25.2.2 yamt wl->wl_error_count++;
1198 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
1199 1.25.2.2 yamt cv_broadcast(&wl->wl_reclaimable_cv);
1200 1.25.2.2 yamt }
1201 1.25.2.2 yamt we->we_error = bp->b_error;
1202 1.25.2.2 yamt bp->b_error = 0;
1203 1.25.2.2 yamt brelse(bp);
1204 1.25.2.2 yamt return;
1205 1.25.2.2 yamt #else
1206 1.25.2.2 yamt /* For now, just mark the log permanently errored out */
1207 1.25.2.2 yamt
1208 1.25.2.2 yamt mutex_enter(&wl->wl_mtx);
1209 1.25.2.2 yamt if (wl->wl_error_count == 0) {
1210 1.25.2.2 yamt wl->wl_error_count++;
1211 1.25.2.2 yamt cv_broadcast(&wl->wl_reclaimable_cv);
1212 1.25.2.2 yamt }
1213 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
1214 1.25.2.2 yamt #endif
1215 1.25.2.2 yamt }
1216 1.25.2.2 yamt
1217 1.25.2.2 yamt mutex_enter(&wl->wl_mtx);
1218 1.25.2.2 yamt
1219 1.25.2.2 yamt KASSERT(we->we_bufcount > 0);
1220 1.25.2.2 yamt we->we_bufcount--;
1221 1.25.2.2 yamt #ifdef WAPBL_DEBUG_BUFBYTES
1222 1.25.2.2 yamt KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1223 1.25.2.2 yamt we->we_unsynced_bufbytes -= bp->b_bufsize;
1224 1.25.2.2 yamt KASSERT(wl->wl_unsynced_bufbytes >= bp->b_bufsize);
1225 1.25.2.2 yamt wl->wl_unsynced_bufbytes -= bp->b_bufsize;
1226 1.25.2.2 yamt #endif
1227 1.25.2.2 yamt
1228 1.25.2.2 yamt /*
1229 1.25.2.2 yamt * If the current transaction can be reclaimed, start
1230 1.25.2.2 yamt * at the beginning and reclaim any consecutive reclaimable
1231 1.25.2.2 yamt * transactions. If we successfully reclaim anything,
1232 1.25.2.2 yamt * then wakeup anyone waiting for the reclaim.
1233 1.25.2.2 yamt */
1234 1.25.2.2 yamt if (we->we_bufcount == 0) {
1235 1.25.2.2 yamt size_t delta = 0;
1236 1.25.2.2 yamt int errcnt = 0;
1237 1.25.2.2 yamt #ifdef WAPBL_DEBUG_BUFBYTES
1238 1.25.2.2 yamt KDASSERT(we->we_unsynced_bufbytes == 0);
1239 1.25.2.2 yamt #endif
1240 1.25.2.2 yamt /*
1241 1.25.2.2 yamt * clear any posted error, since the buffer it came from
1242 1.25.2.2 yamt * has successfully flushed by now
1243 1.25.2.2 yamt */
1244 1.25.2.2 yamt while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1245 1.25.2.2 yamt (we->we_bufcount == 0)) {
1246 1.25.2.2 yamt delta += we->we_reclaimable_bytes;
1247 1.25.2.2 yamt if (we->we_error)
1248 1.25.2.2 yamt errcnt++;
1249 1.25.2.2 yamt SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1250 1.25.2.2 yamt wapbl_free(we, sizeof(*we));
1251 1.25.2.2 yamt }
1252 1.25.2.2 yamt
1253 1.25.2.2 yamt if (delta) {
1254 1.25.2.2 yamt wl->wl_reclaimable_bytes += delta;
1255 1.25.2.2 yamt KASSERT(wl->wl_error_count >= errcnt);
1256 1.25.2.2 yamt wl->wl_error_count -= errcnt;
1257 1.25.2.2 yamt cv_broadcast(&wl->wl_reclaimable_cv);
1258 1.25.2.2 yamt }
1259 1.25.2.2 yamt }
1260 1.25.2.2 yamt
1261 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
1262 1.25.2.2 yamt brelse(bp, 0);
1263 1.25.2.2 yamt }
1264 1.25.2.2 yamt
1265 1.25.2.2 yamt /*
1266 1.25.2.2 yamt * Write transactions to disk + start I/O for contents
1267 1.25.2.2 yamt */
1268 1.25.2.2 yamt int
1269 1.25.2.2 yamt wapbl_flush(struct wapbl *wl, int waitfor)
1270 1.25.2.2 yamt {
1271 1.25.2.2 yamt struct buf *bp;
1272 1.25.2.2 yamt struct wapbl_entry *we;
1273 1.25.2.2 yamt off_t off;
1274 1.25.2.2 yamt off_t head;
1275 1.25.2.2 yamt off_t tail;
1276 1.25.2.2 yamt size_t delta = 0;
1277 1.25.2.2 yamt size_t flushsize;
1278 1.25.2.2 yamt size_t reserved;
1279 1.25.2.2 yamt int error = 0;
1280 1.25.2.2 yamt
1281 1.25.2.2 yamt /*
1282 1.25.2.2 yamt * Do a quick check to see if a full flush can be skipped
1283 1.25.2.2 yamt * This assumes that the flush callback does not need to be called
1284 1.25.2.2 yamt * unless there are other outstanding bufs.
1285 1.25.2.2 yamt */
1286 1.25.2.2 yamt if (!waitfor) {
1287 1.25.2.2 yamt size_t nbufs;
1288 1.25.2.2 yamt mutex_enter(&wl->wl_mtx); /* XXX need mutex here to
1289 1.25.2.2 yamt protect the KASSERTS */
1290 1.25.2.2 yamt nbufs = wl->wl_bufcount;
1291 1.25.2.2 yamt KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1292 1.25.2.2 yamt KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1293 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
1294 1.25.2.2 yamt if (nbufs == 0)
1295 1.25.2.2 yamt return 0;
1296 1.25.2.2 yamt }
1297 1.25.2.2 yamt
1298 1.25.2.2 yamt /*
1299 1.25.2.2 yamt * XXX we may consider using LK_UPGRADE here
1300 1.25.2.2 yamt * if we want to call flush from inside a transaction
1301 1.25.2.2 yamt */
1302 1.25.2.2 yamt rw_enter(&wl->wl_rwlock, RW_WRITER);
1303 1.25.2.2 yamt wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
1304 1.25.2.2 yamt wl->wl_dealloccnt);
1305 1.25.2.2 yamt
1306 1.25.2.2 yamt /*
1307 1.25.2.2 yamt * Now that we are fully locked and flushed,
1308 1.25.2.2 yamt * do another check for nothing to do.
1309 1.25.2.2 yamt */
1310 1.25.2.2 yamt if (wl->wl_bufcount == 0) {
1311 1.25.2.2 yamt goto out;
1312 1.25.2.2 yamt }
1313 1.25.2.2 yamt
1314 1.25.2.2 yamt #if 0
1315 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1316 1.25.2.2 yamt ("wapbl_flush thread %d.%d flushing entries with "
1317 1.25.2.2 yamt "bufcount=%zu bufbytes=%zu\n",
1318 1.25.2.2 yamt curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1319 1.25.2.2 yamt wl->wl_bufbytes));
1320 1.25.2.2 yamt #endif
1321 1.25.2.2 yamt
1322 1.25.2.2 yamt /* Calculate amount of space needed to flush */
1323 1.25.2.2 yamt flushsize = wapbl_transaction_len(wl);
1324 1.25.2.2 yamt
1325 1.25.2.2 yamt if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1326 1.25.2.2 yamt /*
1327 1.25.2.2 yamt * XXX this could be handled more gracefully, perhaps place
1328 1.25.2.2 yamt * only a partial transaction in the log and allow the
1329 1.25.2.2 yamt * remaining to flush without the protection of the journal.
1330 1.25.2.2 yamt */
1331 1.25.2.2 yamt panic("wapbl_flush: current transaction too big to flush\n");
1332 1.25.2.2 yamt }
1333 1.25.2.2 yamt
1334 1.25.2.2 yamt error = wapbl_truncate(wl, flushsize, 0);
1335 1.25.2.2 yamt if (error)
1336 1.25.2.2 yamt goto out2;
1337 1.25.2.2 yamt
1338 1.25.2.2 yamt off = wl->wl_head;
1339 1.25.2.2 yamt KASSERT((off == 0) || ((off >= wl->wl_circ_off) &&
1340 1.25.2.2 yamt (off < wl->wl_circ_off + wl->wl_circ_size)));
1341 1.25.2.2 yamt error = wapbl_write_blocks(wl, &off);
1342 1.25.2.2 yamt if (error)
1343 1.25.2.2 yamt goto out2;
1344 1.25.2.2 yamt error = wapbl_write_revocations(wl, &off);
1345 1.25.2.2 yamt if (error)
1346 1.25.2.2 yamt goto out2;
1347 1.25.2.2 yamt error = wapbl_write_inodes(wl, &off);
1348 1.25.2.2 yamt if (error)
1349 1.25.2.2 yamt goto out2;
1350 1.25.2.2 yamt
1351 1.25.2.2 yamt reserved = 0;
1352 1.25.2.2 yamt if (wl->wl_inohashcnt)
1353 1.25.2.2 yamt reserved = wapbl_transaction_inodes_len(wl);
1354 1.25.2.2 yamt
1355 1.25.2.2 yamt head = wl->wl_head;
1356 1.25.2.2 yamt tail = wl->wl_tail;
1357 1.25.2.2 yamt
1358 1.25.2.2 yamt wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1359 1.25.2.2 yamt &head, &tail);
1360 1.25.2.2 yamt #ifdef WAPBL_DEBUG
1361 1.25.2.2 yamt if (head != off) {
1362 1.25.2.2 yamt panic("lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1363 1.25.2.2 yamt " off=%"PRIdMAX" flush=%zu\n",
1364 1.25.2.2 yamt (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1365 1.25.2.2 yamt flushsize);
1366 1.25.2.2 yamt }
1367 1.25.2.2 yamt #else
1368 1.25.2.2 yamt KASSERT(head == off);
1369 1.25.2.2 yamt #endif
1370 1.25.2.2 yamt
1371 1.25.2.2 yamt /* Opportunistically move the tail forward if we can */
1372 1.25.2.2 yamt if (!wapbl_lazy_truncate) {
1373 1.25.2.2 yamt mutex_enter(&wl->wl_mtx);
1374 1.25.2.2 yamt delta = wl->wl_reclaimable_bytes;
1375 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
1376 1.25.2.2 yamt wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1377 1.25.2.2 yamt &head, &tail);
1378 1.25.2.2 yamt }
1379 1.25.2.2 yamt
1380 1.25.2.2 yamt error = wapbl_write_commit(wl, head, tail);
1381 1.25.2.2 yamt if (error)
1382 1.25.2.2 yamt goto out2;
1383 1.25.2.2 yamt
1384 1.25.2.2 yamt we = wapbl_calloc(1, sizeof(*we));
1385 1.25.2.2 yamt
1386 1.25.2.2 yamt #ifdef WAPBL_DEBUG_BUFBYTES
1387 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1388 1.25.2.2 yamt ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1389 1.25.2.2 yamt " unsynced=%zu"
1390 1.25.2.2 yamt "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1391 1.25.2.2 yamt "inodes=%d\n",
1392 1.25.2.2 yamt curproc->p_pid, curlwp->l_lid, flushsize, delta,
1393 1.25.2.2 yamt wapbl_space_used(wl->wl_circ_size, head, tail),
1394 1.25.2.2 yamt wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1395 1.25.2.2 yamt wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1396 1.25.2.2 yamt wl->wl_inohashcnt));
1397 1.25.2.2 yamt #else
1398 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1399 1.25.2.2 yamt ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1400 1.25.2.2 yamt "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1401 1.25.2.2 yamt "inodes=%d\n",
1402 1.25.2.2 yamt curproc->p_pid, curlwp->l_lid, flushsize, delta,
1403 1.25.2.2 yamt wapbl_space_used(wl->wl_circ_size, head, tail),
1404 1.25.2.2 yamt wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1405 1.25.2.2 yamt wl->wl_dealloccnt, wl->wl_inohashcnt));
1406 1.25.2.2 yamt #endif
1407 1.25.2.2 yamt
1408 1.25.2.2 yamt
1409 1.25.2.2 yamt mutex_enter(&bufcache_lock);
1410 1.25.2.2 yamt mutex_enter(&wl->wl_mtx);
1411 1.25.2.2 yamt
1412 1.25.2.2 yamt wl->wl_reserved_bytes = reserved;
1413 1.25.2.2 yamt wl->wl_head = head;
1414 1.25.2.2 yamt wl->wl_tail = tail;
1415 1.25.2.2 yamt KASSERT(wl->wl_reclaimable_bytes >= delta);
1416 1.25.2.2 yamt wl->wl_reclaimable_bytes -= delta;
1417 1.25.2.2 yamt wl->wl_dealloccnt = 0;
1418 1.25.2.2 yamt #ifdef WAPBL_DEBUG_BUFBYTES
1419 1.25.2.2 yamt wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1420 1.25.2.2 yamt #endif
1421 1.25.2.2 yamt
1422 1.25.2.2 yamt we->we_wapbl = wl;
1423 1.25.2.2 yamt we->we_bufcount = wl->wl_bufcount;
1424 1.25.2.2 yamt #ifdef WAPBL_DEBUG_BUFBYTES
1425 1.25.2.2 yamt we->we_unsynced_bufbytes = wl->wl_bufbytes;
1426 1.25.2.2 yamt #endif
1427 1.25.2.2 yamt we->we_reclaimable_bytes = flushsize;
1428 1.25.2.2 yamt we->we_error = 0;
1429 1.25.2.2 yamt SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1430 1.25.2.2 yamt
1431 1.25.2.2 yamt /*
1432 1.25.2.2 yamt * this flushes bufs in reverse order than they were queued
1433 1.25.2.2 yamt * it shouldn't matter, but if we care we could use TAILQ instead.
1434 1.25.2.2 yamt * XXX Note they will get put on the lru queue when they flush
1435 1.25.2.2 yamt * so we might actually want to change this to preserve order.
1436 1.25.2.2 yamt */
1437 1.25.2.2 yamt while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
1438 1.25.2.2 yamt if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1439 1.25.2.2 yamt continue;
1440 1.25.2.2 yamt }
1441 1.25.2.2 yamt bp->b_iodone = wapbl_biodone;
1442 1.25.2.2 yamt bp->b_private = we;
1443 1.25.2.2 yamt bremfree(bp);
1444 1.25.2.2 yamt wapbl_remove_buf_locked(wl, bp);
1445 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
1446 1.25.2.2 yamt mutex_exit(&bufcache_lock);
1447 1.25.2.2 yamt bawrite(bp);
1448 1.25.2.2 yamt mutex_enter(&bufcache_lock);
1449 1.25.2.2 yamt mutex_enter(&wl->wl_mtx);
1450 1.25.2.2 yamt }
1451 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
1452 1.25.2.2 yamt mutex_exit(&bufcache_lock);
1453 1.25.2.2 yamt
1454 1.25.2.2 yamt #if 0
1455 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1456 1.25.2.2 yamt ("wapbl_flush thread %d.%d done flushing entries...\n",
1457 1.25.2.2 yamt curproc->p_pid, curlwp->l_lid));
1458 1.25.2.2 yamt #endif
1459 1.25.2.2 yamt
1460 1.25.2.2 yamt out:
1461 1.25.2.2 yamt
1462 1.25.2.2 yamt /*
1463 1.25.2.2 yamt * If the waitfor flag is set, don't return until everything is
1464 1.25.2.2 yamt * fully flushed and the on disk log is empty.
1465 1.25.2.2 yamt */
1466 1.25.2.2 yamt if (waitfor) {
1467 1.25.2.2 yamt error = wapbl_truncate(wl, wl->wl_circ_size -
1468 1.25.2.2 yamt wl->wl_reserved_bytes, wapbl_lazy_truncate);
1469 1.25.2.2 yamt }
1470 1.25.2.2 yamt
1471 1.25.2.2 yamt out2:
1472 1.25.2.2 yamt if (error) {
1473 1.25.2.2 yamt wl->wl_flush_abort(wl->wl_mount, wl->wl_deallocblks,
1474 1.25.2.2 yamt wl->wl_dealloclens, wl->wl_dealloccnt);
1475 1.25.2.2 yamt }
1476 1.25.2.2 yamt
1477 1.25.2.2 yamt #ifdef WAPBL_DEBUG_PRINT
1478 1.25.2.2 yamt if (error) {
1479 1.25.2.2 yamt pid_t pid = -1;
1480 1.25.2.2 yamt lwpid_t lid = -1;
1481 1.25.2.2 yamt if (curproc)
1482 1.25.2.2 yamt pid = curproc->p_pid;
1483 1.25.2.2 yamt if (curlwp)
1484 1.25.2.2 yamt lid = curlwp->l_lid;
1485 1.25.2.2 yamt mutex_enter(&wl->wl_mtx);
1486 1.25.2.2 yamt #ifdef WAPBL_DEBUG_BUFBYTES
1487 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1488 1.25.2.2 yamt ("wapbl_flush: thread %d.%d aborted flush: "
1489 1.25.2.2 yamt "error = %d\n"
1490 1.25.2.2 yamt "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1491 1.25.2.2 yamt "deallocs=%d inodes=%d\n"
1492 1.25.2.2 yamt "\terrcnt = %d, reclaimable=%zu reserved=%zu "
1493 1.25.2.2 yamt "unsynced=%zu\n",
1494 1.25.2.2 yamt pid, lid, error, wl->wl_bufcount,
1495 1.25.2.2 yamt wl->wl_bufbytes, wl->wl_bcount,
1496 1.25.2.2 yamt wl->wl_dealloccnt, wl->wl_inohashcnt,
1497 1.25.2.2 yamt wl->wl_error_count, wl->wl_reclaimable_bytes,
1498 1.25.2.2 yamt wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1499 1.25.2.2 yamt SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1500 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1501 1.25.2.2 yamt ("\tentry: bufcount = %zu, reclaimable = %zu, "
1502 1.25.2.2 yamt "error = %d, unsynced = %zu\n",
1503 1.25.2.2 yamt we->we_bufcount, we->we_reclaimable_bytes,
1504 1.25.2.2 yamt we->we_error, we->we_unsynced_bufbytes));
1505 1.25.2.2 yamt }
1506 1.25.2.2 yamt #else
1507 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1508 1.25.2.2 yamt ("wapbl_flush: thread %d.%d aborted flush: "
1509 1.25.2.2 yamt "error = %d\n"
1510 1.25.2.2 yamt "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1511 1.25.2.2 yamt "deallocs=%d inodes=%d\n"
1512 1.25.2.2 yamt "\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
1513 1.25.2.2 yamt pid, lid, error, wl->wl_bufcount,
1514 1.25.2.2 yamt wl->wl_bufbytes, wl->wl_bcount,
1515 1.25.2.2 yamt wl->wl_dealloccnt, wl->wl_inohashcnt,
1516 1.25.2.2 yamt wl->wl_error_count, wl->wl_reclaimable_bytes,
1517 1.25.2.2 yamt wl->wl_reserved_bytes));
1518 1.25.2.2 yamt SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1519 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1520 1.25.2.2 yamt ("\tentry: bufcount = %zu, reclaimable = %zu, "
1521 1.25.2.2 yamt "error = %d\n", we->we_bufcount,
1522 1.25.2.2 yamt we->we_reclaimable_bytes, we->we_error));
1523 1.25.2.2 yamt }
1524 1.25.2.2 yamt #endif
1525 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
1526 1.25.2.2 yamt }
1527 1.25.2.2 yamt #endif
1528 1.25.2.2 yamt
1529 1.25.2.2 yamt rw_exit(&wl->wl_rwlock);
1530 1.25.2.2 yamt return error;
1531 1.25.2.2 yamt }
1532 1.25.2.2 yamt
1533 1.25.2.2 yamt /****************************************************************/
1534 1.25.2.2 yamt
1535 1.25.2.2 yamt void
1536 1.25.2.2 yamt wapbl_jlock_assert(struct wapbl *wl)
1537 1.25.2.2 yamt {
1538 1.25.2.2 yamt
1539 1.25.2.2 yamt KASSERT(rw_lock_held(&wl->wl_rwlock));
1540 1.25.2.2 yamt }
1541 1.25.2.2 yamt
1542 1.25.2.2 yamt void
1543 1.25.2.2 yamt wapbl_junlock_assert(struct wapbl *wl)
1544 1.25.2.2 yamt {
1545 1.25.2.2 yamt
1546 1.25.2.2 yamt KASSERT(!rw_write_held(&wl->wl_rwlock));
1547 1.25.2.2 yamt }
1548 1.25.2.2 yamt
1549 1.25.2.2 yamt /****************************************************************/
1550 1.25.2.2 yamt
1551 1.25.2.2 yamt /* locks missing */
1552 1.25.2.2 yamt void
1553 1.25.2.2 yamt wapbl_print(struct wapbl *wl,
1554 1.25.2.2 yamt int full,
1555 1.25.2.2 yamt void (*pr)(const char *, ...))
1556 1.25.2.2 yamt {
1557 1.25.2.2 yamt struct buf *bp;
1558 1.25.2.2 yamt struct wapbl_entry *we;
1559 1.25.2.2 yamt (*pr)("wapbl %p", wl);
1560 1.25.2.2 yamt (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
1561 1.25.2.2 yamt wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
1562 1.25.2.2 yamt (*pr)("circ = %zu, header = %zu, head = %"PRIdMAX" tail = %"PRIdMAX"\n",
1563 1.25.2.2 yamt wl->wl_circ_size, wl->wl_circ_off,
1564 1.25.2.2 yamt (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
1565 1.25.2.2 yamt (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
1566 1.25.2.2 yamt wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
1567 1.25.2.2 yamt #ifdef WAPBL_DEBUG_BUFBYTES
1568 1.25.2.2 yamt (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1569 1.25.2.2 yamt "reserved = %zu errcnt = %d unsynced = %zu\n",
1570 1.25.2.2 yamt wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1571 1.25.2.2 yamt wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1572 1.25.2.2 yamt wl->wl_error_count, wl->wl_unsynced_bufbytes);
1573 1.25.2.2 yamt #else
1574 1.25.2.2 yamt (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1575 1.25.2.2 yamt "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
1576 1.25.2.2 yamt wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1577 1.25.2.2 yamt wl->wl_error_count);
1578 1.25.2.2 yamt #endif
1579 1.25.2.2 yamt (*pr)("\tdealloccnt = %d, dealloclim = %d\n",
1580 1.25.2.2 yamt wl->wl_dealloccnt, wl->wl_dealloclim);
1581 1.25.2.2 yamt (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
1582 1.25.2.2 yamt wl->wl_inohashcnt, wl->wl_inohashmask);
1583 1.25.2.2 yamt (*pr)("entries:\n");
1584 1.25.2.2 yamt SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1585 1.25.2.2 yamt #ifdef WAPBL_DEBUG_BUFBYTES
1586 1.25.2.2 yamt (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
1587 1.25.2.2 yamt "unsynced = %zu\n",
1588 1.25.2.2 yamt we->we_bufcount, we->we_reclaimable_bytes,
1589 1.25.2.2 yamt we->we_error, we->we_unsynced_bufbytes);
1590 1.25.2.2 yamt #else
1591 1.25.2.2 yamt (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
1592 1.25.2.2 yamt we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
1593 1.25.2.2 yamt #endif
1594 1.25.2.2 yamt }
1595 1.25.2.2 yamt if (full) {
1596 1.25.2.2 yamt int cnt = 0;
1597 1.25.2.2 yamt (*pr)("bufs =");
1598 1.25.2.2 yamt LIST_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
1599 1.25.2.2 yamt if (!LIST_NEXT(bp, b_wapbllist)) {
1600 1.25.2.2 yamt (*pr)(" %p", bp);
1601 1.25.2.2 yamt } else if ((++cnt % 6) == 0) {
1602 1.25.2.2 yamt (*pr)(" %p,\n\t", bp);
1603 1.25.2.2 yamt } else {
1604 1.25.2.2 yamt (*pr)(" %p,", bp);
1605 1.25.2.2 yamt }
1606 1.25.2.2 yamt }
1607 1.25.2.2 yamt (*pr)("\n");
1608 1.25.2.2 yamt
1609 1.25.2.2 yamt (*pr)("dealloced blks = ");
1610 1.25.2.2 yamt {
1611 1.25.2.2 yamt int i;
1612 1.25.2.2 yamt cnt = 0;
1613 1.25.2.2 yamt for (i = 0; i < wl->wl_dealloccnt; i++) {
1614 1.25.2.2 yamt (*pr)(" %"PRId64":%d,",
1615 1.25.2.2 yamt wl->wl_deallocblks[i],
1616 1.25.2.2 yamt wl->wl_dealloclens[i]);
1617 1.25.2.2 yamt if ((++cnt % 4) == 0) {
1618 1.25.2.2 yamt (*pr)("\n\t");
1619 1.25.2.2 yamt }
1620 1.25.2.2 yamt }
1621 1.25.2.2 yamt }
1622 1.25.2.2 yamt (*pr)("\n");
1623 1.25.2.2 yamt
1624 1.25.2.2 yamt (*pr)("registered inodes = ");
1625 1.25.2.2 yamt {
1626 1.25.2.2 yamt int i;
1627 1.25.2.2 yamt cnt = 0;
1628 1.25.2.2 yamt for (i = 0; i <= wl->wl_inohashmask; i++) {
1629 1.25.2.2 yamt struct wapbl_ino_head *wih;
1630 1.25.2.2 yamt struct wapbl_ino *wi;
1631 1.25.2.2 yamt
1632 1.25.2.2 yamt wih = &wl->wl_inohash[i];
1633 1.25.2.2 yamt LIST_FOREACH(wi, wih, wi_hash) {
1634 1.25.2.2 yamt if (wi->wi_ino == 0)
1635 1.25.2.2 yamt continue;
1636 1.25.2.2 yamt (*pr)(" %"PRId32"/0%06"PRIo32",",
1637 1.25.2.2 yamt wi->wi_ino, wi->wi_mode);
1638 1.25.2.2 yamt if ((++cnt % 4) == 0) {
1639 1.25.2.2 yamt (*pr)("\n\t");
1640 1.25.2.2 yamt }
1641 1.25.2.2 yamt }
1642 1.25.2.2 yamt }
1643 1.25.2.2 yamt (*pr)("\n");
1644 1.25.2.2 yamt }
1645 1.25.2.2 yamt }
1646 1.25.2.2 yamt }
1647 1.25.2.2 yamt
1648 1.25.2.2 yamt #if defined(WAPBL_DEBUG) || defined(DDB)
1649 1.25.2.2 yamt void
1650 1.25.2.2 yamt wapbl_dump(struct wapbl *wl)
1651 1.25.2.2 yamt {
1652 1.25.2.2 yamt #if defined(WAPBL_DEBUG)
1653 1.25.2.2 yamt if (!wl)
1654 1.25.2.2 yamt wl = wapbl_debug_wl;
1655 1.25.2.2 yamt #endif
1656 1.25.2.2 yamt if (!wl)
1657 1.25.2.2 yamt return;
1658 1.25.2.2 yamt wapbl_print(wl, 1, printf);
1659 1.25.2.2 yamt }
1660 1.25.2.2 yamt #endif
1661 1.25.2.2 yamt
1662 1.25.2.2 yamt /****************************************************************/
1663 1.25.2.2 yamt
1664 1.25.2.2 yamt void
1665 1.25.2.2 yamt wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len)
1666 1.25.2.2 yamt {
1667 1.25.2.2 yamt
1668 1.25.2.2 yamt wapbl_jlock_assert(wl);
1669 1.25.2.2 yamt
1670 1.25.2.2 yamt /* XXX should eventually instead tie this into resource estimation */
1671 1.25.2.2 yamt /* XXX this KASSERT needs locking/mutex analysis */
1672 1.25.2.2 yamt KASSERT(wl->wl_dealloccnt < wl->wl_dealloclim);
1673 1.25.2.2 yamt wl->wl_deallocblks[wl->wl_dealloccnt] = blk;
1674 1.25.2.2 yamt wl->wl_dealloclens[wl->wl_dealloccnt] = len;
1675 1.25.2.2 yamt wl->wl_dealloccnt++;
1676 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
1677 1.25.2.2 yamt ("wapbl_register_deallocation: blk=%"PRId64" len=%d\n", blk, len));
1678 1.25.2.2 yamt }
1679 1.25.2.2 yamt
1680 1.25.2.2 yamt /****************************************************************/
1681 1.25.2.2 yamt
1682 1.25.2.2 yamt static void
1683 1.25.2.2 yamt wapbl_inodetrk_init(struct wapbl *wl, u_int size)
1684 1.25.2.2 yamt {
1685 1.25.2.2 yamt
1686 1.25.2.2 yamt wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
1687 1.25.2.2 yamt if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) {
1688 1.25.2.2 yamt pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0,
1689 1.25.2.2 yamt "wapblinopl", &pool_allocator_nointr, IPL_NONE);
1690 1.25.2.2 yamt }
1691 1.25.2.2 yamt }
1692 1.25.2.2 yamt
1693 1.25.2.2 yamt static void
1694 1.25.2.2 yamt wapbl_inodetrk_free(struct wapbl *wl)
1695 1.25.2.2 yamt {
1696 1.25.2.2 yamt
1697 1.25.2.2 yamt /* XXX this KASSERT needs locking/mutex analysis */
1698 1.25.2.2 yamt KASSERT(wl->wl_inohashcnt == 0);
1699 1.25.2.2 yamt hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
1700 1.25.2.2 yamt if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) {
1701 1.25.2.2 yamt pool_destroy(&wapbl_ino_pool);
1702 1.25.2.2 yamt }
1703 1.25.2.2 yamt }
1704 1.25.2.2 yamt
1705 1.25.2.2 yamt static struct wapbl_ino *
1706 1.25.2.2 yamt wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
1707 1.25.2.2 yamt {
1708 1.25.2.2 yamt struct wapbl_ino_head *wih;
1709 1.25.2.2 yamt struct wapbl_ino *wi;
1710 1.25.2.2 yamt
1711 1.25.2.2 yamt KASSERT(mutex_owned(&wl->wl_mtx));
1712 1.25.2.2 yamt
1713 1.25.2.2 yamt wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1714 1.25.2.2 yamt LIST_FOREACH(wi, wih, wi_hash) {
1715 1.25.2.2 yamt if (ino == wi->wi_ino)
1716 1.25.2.2 yamt return wi;
1717 1.25.2.2 yamt }
1718 1.25.2.2 yamt return 0;
1719 1.25.2.2 yamt }
1720 1.25.2.2 yamt
1721 1.25.2.2 yamt void
1722 1.25.2.2 yamt wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1723 1.25.2.2 yamt {
1724 1.25.2.2 yamt struct wapbl_ino_head *wih;
1725 1.25.2.2 yamt struct wapbl_ino *wi;
1726 1.25.2.2 yamt
1727 1.25.2.2 yamt wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
1728 1.25.2.2 yamt
1729 1.25.2.2 yamt mutex_enter(&wl->wl_mtx);
1730 1.25.2.2 yamt if (wapbl_inodetrk_get(wl, ino) == NULL) {
1731 1.25.2.2 yamt wi->wi_ino = ino;
1732 1.25.2.2 yamt wi->wi_mode = mode;
1733 1.25.2.2 yamt wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1734 1.25.2.2 yamt LIST_INSERT_HEAD(wih, wi, wi_hash);
1735 1.25.2.2 yamt wl->wl_inohashcnt++;
1736 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_INODE,
1737 1.25.2.2 yamt ("wapbl_register_inode: ino=%"PRId64"\n", ino));
1738 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
1739 1.25.2.2 yamt } else {
1740 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
1741 1.25.2.2 yamt pool_put(&wapbl_ino_pool, wi);
1742 1.25.2.2 yamt }
1743 1.25.2.2 yamt }
1744 1.25.2.2 yamt
1745 1.25.2.2 yamt void
1746 1.25.2.2 yamt wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1747 1.25.2.2 yamt {
1748 1.25.2.2 yamt struct wapbl_ino *wi;
1749 1.25.2.2 yamt
1750 1.25.2.2 yamt mutex_enter(&wl->wl_mtx);
1751 1.25.2.2 yamt wi = wapbl_inodetrk_get(wl, ino);
1752 1.25.2.2 yamt if (wi) {
1753 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_INODE,
1754 1.25.2.2 yamt ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
1755 1.25.2.2 yamt KASSERT(wl->wl_inohashcnt > 0);
1756 1.25.2.2 yamt wl->wl_inohashcnt--;
1757 1.25.2.2 yamt LIST_REMOVE(wi, wi_hash);
1758 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
1759 1.25.2.2 yamt
1760 1.25.2.2 yamt pool_put(&wapbl_ino_pool, wi);
1761 1.25.2.2 yamt } else {
1762 1.25.2.2 yamt mutex_exit(&wl->wl_mtx);
1763 1.25.2.2 yamt }
1764 1.25.2.2 yamt }
1765 1.25.2.2 yamt
1766 1.25.2.2 yamt /****************************************************************/
1767 1.25.2.2 yamt
1768 1.25.2.2 yamt static __inline size_t
1769 1.25.2.2 yamt wapbl_transaction_inodes_len(struct wapbl *wl)
1770 1.25.2.2 yamt {
1771 1.25.2.2 yamt int blocklen = 1<<wl->wl_log_dev_bshift;
1772 1.25.2.2 yamt int iph;
1773 1.25.2.2 yamt
1774 1.25.2.2 yamt /* Calculate number of inodes described in a inodelist header */
1775 1.25.2.2 yamt iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
1776 1.25.2.2 yamt sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
1777 1.25.2.2 yamt
1778 1.25.2.2 yamt KASSERT(iph > 0);
1779 1.25.2.2 yamt
1780 1.25.2.2 yamt return MAX(1, howmany(wl->wl_inohashcnt, iph))*blocklen;
1781 1.25.2.2 yamt }
1782 1.25.2.2 yamt
1783 1.25.2.2 yamt
1784 1.25.2.2 yamt /* Calculate amount of space a transaction will take on disk */
1785 1.25.2.2 yamt static size_t
1786 1.25.2.2 yamt wapbl_transaction_len(struct wapbl *wl)
1787 1.25.2.2 yamt {
1788 1.25.2.2 yamt int blocklen = 1<<wl->wl_log_dev_bshift;
1789 1.25.2.2 yamt size_t len;
1790 1.25.2.2 yamt int bph;
1791 1.25.2.2 yamt
1792 1.25.2.2 yamt /* Calculate number of blocks described in a blocklist header */
1793 1.25.2.2 yamt bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1794 1.25.2.2 yamt sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1795 1.25.2.2 yamt
1796 1.25.2.2 yamt KASSERT(bph > 0);
1797 1.25.2.2 yamt
1798 1.25.2.2 yamt len = wl->wl_bcount;
1799 1.25.2.2 yamt len += howmany(wl->wl_bufcount, bph)*blocklen;
1800 1.25.2.2 yamt len += howmany(wl->wl_dealloccnt, bph)*blocklen;
1801 1.25.2.2 yamt len += wapbl_transaction_inodes_len(wl);
1802 1.25.2.2 yamt
1803 1.25.2.2 yamt return len;
1804 1.25.2.2 yamt }
1805 1.25.2.2 yamt
1806 1.25.2.2 yamt /*
1807 1.25.2.2 yamt * Perform commit operation
1808 1.25.2.2 yamt *
1809 1.25.2.2 yamt * Note that generation number incrementation needs to
1810 1.25.2.2 yamt * be protected against racing with other invocations
1811 1.25.2.2 yamt * of wapbl_commit. This is ok since this routine
1812 1.25.2.2 yamt * is only invoked from wapbl_flush
1813 1.25.2.2 yamt */
1814 1.25.2.2 yamt static int
1815 1.25.2.2 yamt wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
1816 1.25.2.2 yamt {
1817 1.25.2.2 yamt struct wapbl_wc_header *wc = wl->wl_wc_header;
1818 1.25.2.2 yamt struct timespec ts;
1819 1.25.2.2 yamt int error;
1820 1.25.2.2 yamt int force = 1;
1821 1.25.2.2 yamt
1822 1.25.2.2 yamt /* XXX Calc checksum here, instead we do this for now */
1823 1.25.2.2 yamt error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1824 1.25.2.2 yamt if (error) {
1825 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1826 1.25.2.2 yamt ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1827 1.25.2.2 yamt "returned %d\n", wl->wl_devvp->v_rdev, error));
1828 1.25.2.2 yamt }
1829 1.25.2.2 yamt
1830 1.25.2.2 yamt wc->wc_head = head;
1831 1.25.2.2 yamt wc->wc_tail = tail;
1832 1.25.2.2 yamt wc->wc_checksum = 0;
1833 1.25.2.2 yamt wc->wc_version = 1;
1834 1.25.2.2 yamt getnanotime(&ts);
1835 1.25.2.2 yamt wc->wc_time = ts.tv_sec;
1836 1.25.2.2 yamt wc->wc_timensec = ts.tv_nsec;
1837 1.25.2.2 yamt
1838 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1839 1.25.2.2 yamt ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
1840 1.25.2.2 yamt (intmax_t)head, (intmax_t)tail));
1841 1.25.2.2 yamt
1842 1.25.2.2 yamt /*
1843 1.25.2.2 yamt * XXX if generation will rollover, then first zero
1844 1.25.2.2 yamt * over second commit header before trying to write both headers.
1845 1.25.2.2 yamt */
1846 1.25.2.2 yamt
1847 1.25.2.2 yamt error = wapbl_write(wc, wc->wc_len, wl->wl_devvp,
1848 1.25.2.2 yamt wl->wl_logpbn + wc->wc_generation % 2);
1849 1.25.2.2 yamt if (error)
1850 1.25.2.2 yamt return error;
1851 1.25.2.2 yamt
1852 1.25.2.2 yamt error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1853 1.25.2.2 yamt if (error) {
1854 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1855 1.25.2.2 yamt ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1856 1.25.2.2 yamt "returned %d\n", wl->wl_devvp->v_rdev, error));
1857 1.25.2.2 yamt }
1858 1.25.2.2 yamt
1859 1.25.2.2 yamt /*
1860 1.25.2.2 yamt * If the generation number was zero, write it out a second time.
1861 1.25.2.2 yamt * This handles initialization and generation number rollover
1862 1.25.2.2 yamt */
1863 1.25.2.2 yamt if (wc->wc_generation++ == 0) {
1864 1.25.2.2 yamt error = wapbl_write_commit(wl, head, tail);
1865 1.25.2.2 yamt /*
1866 1.25.2.2 yamt * This panic should be able to be removed if we do the
1867 1.25.2.2 yamt * zero'ing mentioned above, and we are certain to roll
1868 1.25.2.2 yamt * back generation number on failure.
1869 1.25.2.2 yamt */
1870 1.25.2.2 yamt if (error)
1871 1.25.2.2 yamt panic("wapbl_write_commit: error writing duplicate "
1872 1.25.2.2 yamt "log header: %d\n", error);
1873 1.25.2.2 yamt }
1874 1.25.2.2 yamt return 0;
1875 1.25.2.2 yamt }
1876 1.25.2.2 yamt
1877 1.25.2.2 yamt /* Returns new offset value */
1878 1.25.2.2 yamt static int
1879 1.25.2.2 yamt wapbl_write_blocks(struct wapbl *wl, off_t *offp)
1880 1.25.2.2 yamt {
1881 1.25.2.2 yamt struct wapbl_wc_blocklist *wc =
1882 1.25.2.2 yamt (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1883 1.25.2.2 yamt int blocklen = 1<<wl->wl_log_dev_bshift;
1884 1.25.2.2 yamt int bph;
1885 1.25.2.2 yamt struct buf *bp;
1886 1.25.2.2 yamt off_t off = *offp;
1887 1.25.2.2 yamt int error;
1888 1.25.2.2 yamt size_t padding;
1889 1.25.2.2 yamt
1890 1.25.2.2 yamt KASSERT(rw_write_held(&wl->wl_rwlock));
1891 1.25.2.2 yamt
1892 1.25.2.2 yamt bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1893 1.25.2.2 yamt sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1894 1.25.2.2 yamt
1895 1.25.2.2 yamt bp = LIST_FIRST(&wl->wl_bufs);
1896 1.25.2.2 yamt
1897 1.25.2.2 yamt while (bp) {
1898 1.25.2.2 yamt int cnt;
1899 1.25.2.2 yamt struct buf *obp = bp;
1900 1.25.2.2 yamt
1901 1.25.2.2 yamt KASSERT(bp->b_flags & B_LOCKED);
1902 1.25.2.2 yamt
1903 1.25.2.2 yamt wc->wc_type = WAPBL_WC_BLOCKS;
1904 1.25.2.2 yamt wc->wc_len = blocklen;
1905 1.25.2.2 yamt wc->wc_blkcount = 0;
1906 1.25.2.2 yamt while (bp && (wc->wc_blkcount < bph)) {
1907 1.25.2.2 yamt /*
1908 1.25.2.2 yamt * Make sure all the physical block numbers are up to
1909 1.25.2.2 yamt * date. If this is not always true on a given
1910 1.25.2.2 yamt * filesystem, then VOP_BMAP must be called. We
1911 1.25.2.2 yamt * could call VOP_BMAP here, or else in the filesystem
1912 1.25.2.2 yamt * specific flush callback, although neither of those
1913 1.25.2.2 yamt * solutions allow us to take the vnode lock. If a
1914 1.25.2.2 yamt * filesystem requires that we must take the vnode lock
1915 1.25.2.2 yamt * to call VOP_BMAP, then we can probably do it in
1916 1.25.2.2 yamt * bwrite when the vnode lock should already be held
1917 1.25.2.2 yamt * by the invoking code.
1918 1.25.2.2 yamt */
1919 1.25.2.2 yamt KASSERT((bp->b_vp->v_type == VBLK) ||
1920 1.25.2.2 yamt (bp->b_blkno != bp->b_lblkno));
1921 1.25.2.2 yamt KASSERT(bp->b_blkno > 0);
1922 1.25.2.2 yamt
1923 1.25.2.2 yamt wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
1924 1.25.2.2 yamt wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
1925 1.25.2.2 yamt wc->wc_len += bp->b_bcount;
1926 1.25.2.2 yamt wc->wc_blkcount++;
1927 1.25.2.2 yamt bp = LIST_NEXT(bp, b_wapbllist);
1928 1.25.2.2 yamt }
1929 1.25.2.2 yamt if (wc->wc_len % blocklen != 0) {
1930 1.25.2.2 yamt padding = blocklen - wc->wc_len % blocklen;
1931 1.25.2.2 yamt wc->wc_len += padding;
1932 1.25.2.2 yamt } else {
1933 1.25.2.2 yamt padding = 0;
1934 1.25.2.2 yamt }
1935 1.25.2.2 yamt
1936 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1937 1.25.2.2 yamt ("wapbl_write_blocks: len = %u (padding %zu) off = %"PRIdMAX"\n",
1938 1.25.2.2 yamt wc->wc_len, padding, (intmax_t)off));
1939 1.25.2.2 yamt
1940 1.25.2.2 yamt error = wapbl_circ_write(wl, wc, blocklen, &off);
1941 1.25.2.2 yamt if (error)
1942 1.25.2.2 yamt return error;
1943 1.25.2.2 yamt bp = obp;
1944 1.25.2.2 yamt cnt = 0;
1945 1.25.2.2 yamt while (bp && (cnt++ < bph)) {
1946 1.25.2.2 yamt error = wapbl_circ_write(wl, bp->b_data,
1947 1.25.2.2 yamt bp->b_bcount, &off);
1948 1.25.2.2 yamt if (error)
1949 1.25.2.2 yamt return error;
1950 1.25.2.2 yamt bp = LIST_NEXT(bp, b_wapbllist);
1951 1.25.2.2 yamt }
1952 1.25.2.2 yamt if (padding) {
1953 1.25.2.2 yamt void *zero;
1954 1.25.2.2 yamt
1955 1.25.2.2 yamt zero = wapbl_malloc(padding);
1956 1.25.2.2 yamt memset(zero, 0, padding);
1957 1.25.2.2 yamt error = wapbl_circ_write(wl, zero, padding, &off);
1958 1.25.2.2 yamt wapbl_free(zero, padding);
1959 1.25.2.2 yamt if (error)
1960 1.25.2.2 yamt return error;
1961 1.25.2.2 yamt }
1962 1.25.2.2 yamt }
1963 1.25.2.2 yamt *offp = off;
1964 1.25.2.2 yamt return 0;
1965 1.25.2.2 yamt }
1966 1.25.2.2 yamt
1967 1.25.2.2 yamt static int
1968 1.25.2.2 yamt wapbl_write_revocations(struct wapbl *wl, off_t *offp)
1969 1.25.2.2 yamt {
1970 1.25.2.2 yamt struct wapbl_wc_blocklist *wc =
1971 1.25.2.2 yamt (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1972 1.25.2.2 yamt int i;
1973 1.25.2.2 yamt int blocklen = 1<<wl->wl_log_dev_bshift;
1974 1.25.2.2 yamt int bph;
1975 1.25.2.2 yamt off_t off = *offp;
1976 1.25.2.2 yamt int error;
1977 1.25.2.2 yamt
1978 1.25.2.2 yamt if (wl->wl_dealloccnt == 0)
1979 1.25.2.2 yamt return 0;
1980 1.25.2.2 yamt
1981 1.25.2.2 yamt bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1982 1.25.2.2 yamt sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1983 1.25.2.2 yamt
1984 1.25.2.2 yamt i = 0;
1985 1.25.2.2 yamt while (i < wl->wl_dealloccnt) {
1986 1.25.2.2 yamt wc->wc_type = WAPBL_WC_REVOCATIONS;
1987 1.25.2.2 yamt wc->wc_len = blocklen;
1988 1.25.2.2 yamt wc->wc_blkcount = 0;
1989 1.25.2.2 yamt while ((i < wl->wl_dealloccnt) && (wc->wc_blkcount < bph)) {
1990 1.25.2.2 yamt wc->wc_blocks[wc->wc_blkcount].wc_daddr =
1991 1.25.2.2 yamt wl->wl_deallocblks[i];
1992 1.25.2.2 yamt wc->wc_blocks[wc->wc_blkcount].wc_dlen =
1993 1.25.2.2 yamt wl->wl_dealloclens[i];
1994 1.25.2.2 yamt wc->wc_blkcount++;
1995 1.25.2.2 yamt i++;
1996 1.25.2.2 yamt }
1997 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1998 1.25.2.2 yamt ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
1999 1.25.2.2 yamt wc->wc_len, (intmax_t)off));
2000 1.25.2.2 yamt error = wapbl_circ_write(wl, wc, blocklen, &off);
2001 1.25.2.2 yamt if (error)
2002 1.25.2.2 yamt return error;
2003 1.25.2.2 yamt }
2004 1.25.2.2 yamt *offp = off;
2005 1.25.2.2 yamt return 0;
2006 1.25.2.2 yamt }
2007 1.25.2.2 yamt
2008 1.25.2.2 yamt static int
2009 1.25.2.2 yamt wapbl_write_inodes(struct wapbl *wl, off_t *offp)
2010 1.25.2.2 yamt {
2011 1.25.2.2 yamt struct wapbl_wc_inodelist *wc =
2012 1.25.2.2 yamt (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
2013 1.25.2.2 yamt int i;
2014 1.25.2.2 yamt int blocklen = 1 << wl->wl_log_dev_bshift;
2015 1.25.2.2 yamt off_t off = *offp;
2016 1.25.2.2 yamt int error;
2017 1.25.2.2 yamt
2018 1.25.2.2 yamt struct wapbl_ino_head *wih;
2019 1.25.2.2 yamt struct wapbl_ino *wi;
2020 1.25.2.2 yamt int iph;
2021 1.25.2.2 yamt
2022 1.25.2.2 yamt iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2023 1.25.2.2 yamt sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2024 1.25.2.2 yamt
2025 1.25.2.2 yamt i = 0;
2026 1.25.2.2 yamt wih = &wl->wl_inohash[0];
2027 1.25.2.2 yamt wi = 0;
2028 1.25.2.2 yamt do {
2029 1.25.2.2 yamt wc->wc_type = WAPBL_WC_INODES;
2030 1.25.2.2 yamt wc->wc_len = blocklen;
2031 1.25.2.2 yamt wc->wc_inocnt = 0;
2032 1.25.2.2 yamt wc->wc_clear = (i == 0);
2033 1.25.2.2 yamt while ((i < wl->wl_inohashcnt) && (wc->wc_inocnt < iph)) {
2034 1.25.2.2 yamt while (!wi) {
2035 1.25.2.2 yamt KASSERT((wih - &wl->wl_inohash[0])
2036 1.25.2.2 yamt <= wl->wl_inohashmask);
2037 1.25.2.2 yamt wi = LIST_FIRST(wih++);
2038 1.25.2.2 yamt }
2039 1.25.2.2 yamt wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2040 1.25.2.2 yamt wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2041 1.25.2.2 yamt wc->wc_inocnt++;
2042 1.25.2.2 yamt i++;
2043 1.25.2.2 yamt wi = LIST_NEXT(wi, wi_hash);
2044 1.25.2.2 yamt }
2045 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2046 1.25.2.2 yamt ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2047 1.25.2.2 yamt wc->wc_len, (intmax_t)off));
2048 1.25.2.2 yamt error = wapbl_circ_write(wl, wc, blocklen, &off);
2049 1.25.2.2 yamt if (error)
2050 1.25.2.2 yamt return error;
2051 1.25.2.2 yamt } while (i < wl->wl_inohashcnt);
2052 1.25.2.2 yamt
2053 1.25.2.2 yamt *offp = off;
2054 1.25.2.2 yamt return 0;
2055 1.25.2.2 yamt }
2056 1.25.2.2 yamt
2057 1.25.2.2 yamt #endif /* _KERNEL */
2058 1.25.2.2 yamt
2059 1.25.2.2 yamt /****************************************************************/
2060 1.25.2.2 yamt
2061 1.25.2.2 yamt struct wapbl_blk {
2062 1.25.2.2 yamt LIST_ENTRY(wapbl_blk) wb_hash;
2063 1.25.2.2 yamt daddr_t wb_blk;
2064 1.25.2.2 yamt off_t wb_off; /* Offset of this block in the log */
2065 1.25.2.2 yamt };
2066 1.25.2.2 yamt #define WAPBL_BLKPOOL_MIN 83
2067 1.25.2.2 yamt
2068 1.25.2.2 yamt static void
2069 1.25.2.2 yamt wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2070 1.25.2.2 yamt {
2071 1.25.2.2 yamt if (size < WAPBL_BLKPOOL_MIN)
2072 1.25.2.2 yamt size = WAPBL_BLKPOOL_MIN;
2073 1.25.2.2 yamt KASSERT(wr->wr_blkhash == 0);
2074 1.25.2.2 yamt #ifdef _KERNEL
2075 1.25.2.2 yamt wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2076 1.25.2.2 yamt #else /* ! _KERNEL */
2077 1.25.2.2 yamt /* Manually implement hashinit */
2078 1.25.2.2 yamt {
2079 1.25.2.2 yamt unsigned long i, hashsize;
2080 1.25.2.2 yamt for (hashsize = 1; hashsize < size; hashsize <<= 1)
2081 1.25.2.2 yamt continue;
2082 1.25.2.2 yamt wr->wr_blkhash = wapbl_malloc(hashsize * sizeof(*wr->wr_blkhash));
2083 1.25.2.2 yamt for (i = 0; i < wr->wr_blkhashmask; i++)
2084 1.25.2.2 yamt LIST_INIT(&wr->wr_blkhash[i]);
2085 1.25.2.2 yamt wr->wr_blkhashmask = hashsize - 1;
2086 1.25.2.2 yamt }
2087 1.25.2.2 yamt #endif /* ! _KERNEL */
2088 1.25.2.2 yamt }
2089 1.25.2.2 yamt
2090 1.25.2.2 yamt static void
2091 1.25.2.2 yamt wapbl_blkhash_free(struct wapbl_replay *wr)
2092 1.25.2.2 yamt {
2093 1.25.2.2 yamt KASSERT(wr->wr_blkhashcnt == 0);
2094 1.25.2.2 yamt #ifdef _KERNEL
2095 1.25.2.2 yamt hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2096 1.25.2.2 yamt #else /* ! _KERNEL */
2097 1.25.2.2 yamt wapbl_free(wr->wr_blkhash,
2098 1.25.2.2 yamt (wr->wr_blkhashmask + 1) * sizeof(*wr->wr_blkhash));
2099 1.25.2.2 yamt #endif /* ! _KERNEL */
2100 1.25.2.2 yamt }
2101 1.25.2.2 yamt
2102 1.25.2.2 yamt static struct wapbl_blk *
2103 1.25.2.2 yamt wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2104 1.25.2.2 yamt {
2105 1.25.2.2 yamt struct wapbl_blk_head *wbh;
2106 1.25.2.2 yamt struct wapbl_blk *wb;
2107 1.25.2.2 yamt wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2108 1.25.2.2 yamt LIST_FOREACH(wb, wbh, wb_hash) {
2109 1.25.2.2 yamt if (blk == wb->wb_blk)
2110 1.25.2.2 yamt return wb;
2111 1.25.2.2 yamt }
2112 1.25.2.2 yamt return 0;
2113 1.25.2.2 yamt }
2114 1.25.2.2 yamt
2115 1.25.2.2 yamt static void
2116 1.25.2.2 yamt wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2117 1.25.2.2 yamt {
2118 1.25.2.2 yamt struct wapbl_blk_head *wbh;
2119 1.25.2.2 yamt struct wapbl_blk *wb;
2120 1.25.2.2 yamt wb = wapbl_blkhash_get(wr, blk);
2121 1.25.2.2 yamt if (wb) {
2122 1.25.2.2 yamt KASSERT(wb->wb_blk == blk);
2123 1.25.2.2 yamt wb->wb_off = off;
2124 1.25.2.2 yamt } else {
2125 1.25.2.2 yamt wb = wapbl_malloc(sizeof(*wb));
2126 1.25.2.2 yamt wb->wb_blk = blk;
2127 1.25.2.2 yamt wb->wb_off = off;
2128 1.25.2.2 yamt wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2129 1.25.2.2 yamt LIST_INSERT_HEAD(wbh, wb, wb_hash);
2130 1.25.2.2 yamt wr->wr_blkhashcnt++;
2131 1.25.2.2 yamt }
2132 1.25.2.2 yamt }
2133 1.25.2.2 yamt
2134 1.25.2.2 yamt static void
2135 1.25.2.2 yamt wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2136 1.25.2.2 yamt {
2137 1.25.2.2 yamt struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2138 1.25.2.2 yamt if (wb) {
2139 1.25.2.2 yamt KASSERT(wr->wr_blkhashcnt > 0);
2140 1.25.2.2 yamt wr->wr_blkhashcnt--;
2141 1.25.2.2 yamt LIST_REMOVE(wb, wb_hash);
2142 1.25.2.2 yamt wapbl_free(wb, sizeof(*wb));
2143 1.25.2.2 yamt }
2144 1.25.2.2 yamt }
2145 1.25.2.2 yamt
2146 1.25.2.2 yamt static void
2147 1.25.2.2 yamt wapbl_blkhash_clear(struct wapbl_replay *wr)
2148 1.25.2.2 yamt {
2149 1.25.2.2 yamt unsigned long i;
2150 1.25.2.2 yamt for (i = 0; i <= wr->wr_blkhashmask; i++) {
2151 1.25.2.2 yamt struct wapbl_blk *wb;
2152 1.25.2.2 yamt
2153 1.25.2.2 yamt while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2154 1.25.2.2 yamt KASSERT(wr->wr_blkhashcnt > 0);
2155 1.25.2.2 yamt wr->wr_blkhashcnt--;
2156 1.25.2.2 yamt LIST_REMOVE(wb, wb_hash);
2157 1.25.2.2 yamt wapbl_free(wb, sizeof(*wb));
2158 1.25.2.2 yamt }
2159 1.25.2.2 yamt }
2160 1.25.2.2 yamt KASSERT(wr->wr_blkhashcnt == 0);
2161 1.25.2.2 yamt }
2162 1.25.2.2 yamt
2163 1.25.2.2 yamt /****************************************************************/
2164 1.25.2.2 yamt
2165 1.25.2.2 yamt static int
2166 1.25.2.2 yamt wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2167 1.25.2.2 yamt {
2168 1.25.2.2 yamt size_t slen;
2169 1.25.2.2 yamt off_t off = *offp;
2170 1.25.2.2 yamt int error;
2171 1.25.2.2 yamt
2172 1.25.2.2 yamt KASSERT(((len >> wr->wr_log_dev_bshift) <<
2173 1.25.2.2 yamt wr->wr_log_dev_bshift) == len);
2174 1.25.2.2 yamt if (off < wr->wr_circ_off)
2175 1.25.2.2 yamt off = wr->wr_circ_off;
2176 1.25.2.2 yamt slen = wr->wr_circ_off + wr->wr_circ_size - off;
2177 1.25.2.2 yamt if (slen < len) {
2178 1.25.2.2 yamt error = wapbl_read(data, slen, wr->wr_devvp,
2179 1.25.2.2 yamt wr->wr_logpbn + (off >> wr->wr_log_dev_bshift));
2180 1.25.2.2 yamt if (error)
2181 1.25.2.2 yamt return error;
2182 1.25.2.2 yamt data = (uint8_t *)data + slen;
2183 1.25.2.2 yamt len -= slen;
2184 1.25.2.2 yamt off = wr->wr_circ_off;
2185 1.25.2.2 yamt }
2186 1.25.2.2 yamt error = wapbl_read(data, len, wr->wr_devvp,
2187 1.25.2.2 yamt wr->wr_logpbn + (off >> wr->wr_log_dev_bshift));
2188 1.25.2.2 yamt if (error)
2189 1.25.2.2 yamt return error;
2190 1.25.2.2 yamt off += len;
2191 1.25.2.2 yamt if (off >= wr->wr_circ_off + wr->wr_circ_size)
2192 1.25.2.2 yamt off = wr->wr_circ_off;
2193 1.25.2.2 yamt *offp = off;
2194 1.25.2.2 yamt return 0;
2195 1.25.2.2 yamt }
2196 1.25.2.2 yamt
2197 1.25.2.2 yamt static void
2198 1.25.2.2 yamt wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2199 1.25.2.2 yamt {
2200 1.25.2.2 yamt size_t slen;
2201 1.25.2.2 yamt off_t off = *offp;
2202 1.25.2.2 yamt
2203 1.25.2.2 yamt KASSERT(((len >> wr->wr_log_dev_bshift) <<
2204 1.25.2.2 yamt wr->wr_log_dev_bshift) == len);
2205 1.25.2.2 yamt
2206 1.25.2.2 yamt if (off < wr->wr_circ_off)
2207 1.25.2.2 yamt off = wr->wr_circ_off;
2208 1.25.2.2 yamt slen = wr->wr_circ_off + wr->wr_circ_size - off;
2209 1.25.2.2 yamt if (slen < len) {
2210 1.25.2.2 yamt len -= slen;
2211 1.25.2.2 yamt off = wr->wr_circ_off;
2212 1.25.2.2 yamt }
2213 1.25.2.2 yamt off += len;
2214 1.25.2.2 yamt if (off >= wr->wr_circ_off + wr->wr_circ_size)
2215 1.25.2.2 yamt off = wr->wr_circ_off;
2216 1.25.2.2 yamt *offp = off;
2217 1.25.2.2 yamt }
2218 1.25.2.2 yamt
2219 1.25.2.2 yamt /****************************************************************/
2220 1.25.2.2 yamt
2221 1.25.2.2 yamt int
2222 1.25.2.2 yamt wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2223 1.25.2.2 yamt daddr_t off, size_t count, size_t blksize)
2224 1.25.2.2 yamt {
2225 1.25.2.2 yamt struct wapbl_replay *wr;
2226 1.25.2.2 yamt int error;
2227 1.25.2.2 yamt struct vnode *devvp;
2228 1.25.2.2 yamt daddr_t logpbn;
2229 1.25.2.2 yamt uint8_t *scratch;
2230 1.25.2.2 yamt struct wapbl_wc_header *wch;
2231 1.25.2.2 yamt struct wapbl_wc_header *wch2;
2232 1.25.2.2 yamt /* Use this until we read the actual log header */
2233 1.25.2.2 yamt int log_dev_bshift = DEV_BSHIFT;
2234 1.25.2.2 yamt size_t used;
2235 1.25.2.2 yamt
2236 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2237 1.25.2.2 yamt ("wapbl_replay_start: vp=%p off=%"PRId64 " count=%zu blksize=%zu\n",
2238 1.25.2.2 yamt vp, off, count, blksize));
2239 1.25.2.2 yamt
2240 1.25.2.2 yamt if (off < 0)
2241 1.25.2.2 yamt return EINVAL;
2242 1.25.2.2 yamt
2243 1.25.2.2 yamt if (blksize < DEV_BSIZE)
2244 1.25.2.2 yamt return EINVAL;
2245 1.25.2.2 yamt if (blksize % DEV_BSIZE)
2246 1.25.2.2 yamt return EINVAL;
2247 1.25.2.2 yamt
2248 1.25.2.2 yamt #ifdef _KERNEL
2249 1.25.2.2 yamt #if 0
2250 1.25.2.2 yamt /* XXX vp->v_size isn't reliably set for VBLK devices,
2251 1.25.2.2 yamt * especially root. However, we might still want to verify
2252 1.25.2.2 yamt * that the full load is readable */
2253 1.25.2.2 yamt if ((off + count) * blksize > vp->v_size)
2254 1.25.2.2 yamt return EINVAL;
2255 1.25.2.2 yamt #endif
2256 1.25.2.2 yamt
2257 1.25.2.2 yamt if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2258 1.25.2.2 yamt return error;
2259 1.25.2.2 yamt }
2260 1.25.2.2 yamt #else /* ! _KERNEL */
2261 1.25.2.2 yamt devvp = vp;
2262 1.25.2.2 yamt logpbn = off;
2263 1.25.2.2 yamt #endif /* ! _KERNEL */
2264 1.25.2.2 yamt
2265 1.25.2.2 yamt scratch = wapbl_malloc(MAXBSIZE);
2266 1.25.2.2 yamt
2267 1.25.2.2 yamt error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, logpbn);
2268 1.25.2.2 yamt if (error)
2269 1.25.2.2 yamt goto errout;
2270 1.25.2.2 yamt
2271 1.25.2.2 yamt wch = (struct wapbl_wc_header *)scratch;
2272 1.25.2.2 yamt wch2 =
2273 1.25.2.2 yamt (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
2274 1.25.2.2 yamt /* XXX verify checksums and magic numbers */
2275 1.25.2.2 yamt if (wch->wc_type != WAPBL_WC_HEADER) {
2276 1.25.2.2 yamt printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
2277 1.25.2.2 yamt error = EFTYPE;
2278 1.25.2.2 yamt goto errout;
2279 1.25.2.2 yamt }
2280 1.25.2.2 yamt
2281 1.25.2.2 yamt if (wch2->wc_generation > wch->wc_generation)
2282 1.25.2.2 yamt wch = wch2;
2283 1.25.2.2 yamt
2284 1.25.2.2 yamt wr = wapbl_calloc(1, sizeof(*wr));
2285 1.25.2.2 yamt
2286 1.25.2.2 yamt wr->wr_logvp = vp;
2287 1.25.2.2 yamt wr->wr_devvp = devvp;
2288 1.25.2.2 yamt wr->wr_logpbn = logpbn;
2289 1.25.2.2 yamt
2290 1.25.2.2 yamt wr->wr_scratch = scratch;
2291 1.25.2.2 yamt
2292 1.25.2.2 yamt wr->wr_log_dev_bshift = wch->wc_log_dev_bshift;
2293 1.25.2.2 yamt wr->wr_fs_dev_bshift = wch->wc_fs_dev_bshift;
2294 1.25.2.2 yamt wr->wr_circ_off = wch->wc_circ_off;
2295 1.25.2.2 yamt wr->wr_circ_size = wch->wc_circ_size;
2296 1.25.2.2 yamt wr->wr_generation = wch->wc_generation;
2297 1.25.2.2 yamt
2298 1.25.2.2 yamt used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
2299 1.25.2.2 yamt
2300 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2301 1.25.2.2 yamt ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
2302 1.25.2.2 yamt " len=%"PRId64" used=%zu\n",
2303 1.25.2.2 yamt wch->wc_head, wch->wc_tail, wch->wc_circ_off,
2304 1.25.2.2 yamt wch->wc_circ_size, used));
2305 1.25.2.2 yamt
2306 1.25.2.2 yamt wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
2307 1.25.2.2 yamt
2308 1.25.2.2 yamt error = wapbl_replay_process(wr, wch->wc_head, wch->wc_tail);
2309 1.25.2.2 yamt if (error) {
2310 1.25.2.2 yamt wapbl_replay_stop(wr);
2311 1.25.2.2 yamt wapbl_replay_free(wr);
2312 1.25.2.2 yamt return error;
2313 1.25.2.2 yamt }
2314 1.25.2.2 yamt
2315 1.25.2.2 yamt *wrp = wr;
2316 1.25.2.2 yamt return 0;
2317 1.25.2.2 yamt
2318 1.25.2.2 yamt errout:
2319 1.25.2.2 yamt wapbl_free(scratch, MAXBSIZE);
2320 1.25.2.2 yamt return error;
2321 1.25.2.2 yamt }
2322 1.25.2.2 yamt
2323 1.25.2.2 yamt void
2324 1.25.2.2 yamt wapbl_replay_stop(struct wapbl_replay *wr)
2325 1.25.2.2 yamt {
2326 1.25.2.2 yamt
2327 1.25.2.2 yamt if (!wapbl_replay_isopen(wr))
2328 1.25.2.2 yamt return;
2329 1.25.2.2 yamt
2330 1.25.2.2 yamt WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
2331 1.25.2.2 yamt
2332 1.25.2.2 yamt wapbl_free(wr->wr_scratch, MAXBSIZE);
2333 1.25.2.2 yamt wr->wr_scratch = NULL;
2334 1.25.2.2 yamt
2335 1.25.2.2 yamt wr->wr_logvp = NULL;
2336 1.25.2.2 yamt
2337 1.25.2.2 yamt wapbl_blkhash_clear(wr);
2338 1.25.2.2 yamt wapbl_blkhash_free(wr);
2339 1.25.2.2 yamt }
2340 1.25.2.2 yamt
2341 1.25.2.2 yamt void
2342 1.25.2.2 yamt wapbl_replay_free(struct wapbl_replay *wr)
2343 1.25.2.2 yamt {
2344 1.25.2.2 yamt
2345 1.25.2.2 yamt KDASSERT(!wapbl_replay_isopen(wr));
2346 1.25.2.2 yamt
2347 1.25.2.2 yamt if (wr->wr_inodes)
2348 1.25.2.2 yamt wapbl_free(wr->wr_inodes,
2349 1.25.2.2 yamt wr->wr_inodescnt * sizeof(wr->wr_inodes[0]));
2350 1.25.2.2 yamt wapbl_free(wr, sizeof(*wr));
2351 1.25.2.2 yamt }
2352 1.25.2.2 yamt
2353 1.25.2.2 yamt #ifdef _KERNEL
2354 1.25.2.2 yamt int
2355 1.25.2.2 yamt wapbl_replay_isopen1(struct wapbl_replay *wr)
2356 1.25.2.2 yamt {
2357 1.25.2.2 yamt
2358 1.25.2.2 yamt return wapbl_replay_isopen(wr);
2359 1.25.2.2 yamt }
2360 1.25.2.2 yamt #endif
2361 1.25.2.2 yamt
2362 1.25.2.2 yamt static void
2363 1.25.2.2 yamt wapbl_replay_process_blocks(struct wapbl_replay *wr, off_t *offp)
2364 1.25.2.2 yamt {
2365 1.25.2.2 yamt struct wapbl_wc_blocklist *wc =
2366 1.25.2.2 yamt (struct wapbl_wc_blocklist *)wr->wr_scratch;
2367 1.25.2.2 yamt int fsblklen = 1 << wr->wr_fs_dev_bshift;
2368 1.25.2.2 yamt int i, j, n;
2369 1.25.2.2 yamt
2370 1.25.2.2 yamt for (i = 0; i < wc->wc_blkcount; i++) {
2371 1.25.2.2 yamt /*
2372 1.25.2.2 yamt * Enter each physical block into the hashtable independently.
2373 1.25.2.2 yamt */
2374 1.25.2.2 yamt n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
2375 1.25.2.2 yamt for (j = 0; j < n; j++) {
2376 1.25.2.2 yamt wapbl_blkhash_ins(wr, wc->wc_blocks[i].wc_daddr + j,
2377 1.25.2.2 yamt *offp);
2378 1.25.2.2 yamt wapbl_circ_advance(wr, fsblklen, offp);
2379 1.25.2.2 yamt }
2380 1.25.2.2 yamt }
2381 1.25.2.2 yamt }
2382 1.25.2.2 yamt
2383 1.25.2.2 yamt static void
2384 1.25.2.2 yamt wapbl_replay_process_revocations(struct wapbl_replay *wr)
2385 1.25.2.2 yamt {
2386 1.25.2.2 yamt struct wapbl_wc_blocklist *wc =
2387 1.25.2.2 yamt (struct wapbl_wc_blocklist *)wr->wr_scratch;
2388 1.25.2.2 yamt int i, j, n;
2389 1.25.2.2 yamt
2390 1.25.2.2 yamt for (i = 0; i < wc->wc_blkcount; i++) {
2391 1.25.2.2 yamt /*
2392 1.25.2.2 yamt * Remove any blocks found from the hashtable.
2393 1.25.2.2 yamt */
2394 1.25.2.2 yamt n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
2395 1.25.2.2 yamt for (j = 0; j < n; j++)
2396 1.25.2.2 yamt wapbl_blkhash_rem(wr, wc->wc_blocks[i].wc_daddr + j);
2397 1.25.2.2 yamt }
2398 1.25.2.2 yamt }
2399 1.25.2.2 yamt
2400 1.25.2.2 yamt static void
2401 1.25.2.2 yamt wapbl_replay_process_inodes(struct wapbl_replay *wr, off_t oldoff, off_t newoff)
2402 1.25.2.2 yamt {
2403 1.25.2.2 yamt struct wapbl_wc_inodelist *wc =
2404 1.25.2.2 yamt (struct wapbl_wc_inodelist *)wr->wr_scratch;
2405 1.25.2.2 yamt void *new_inodes;
2406 1.25.2.2 yamt const size_t oldsize = wr->wr_inodescnt * sizeof(wr->wr_inodes[0]);
2407 1.25.2.2 yamt
2408 1.25.2.2 yamt KASSERT(sizeof(wr->wr_inodes[0]) == sizeof(wc->wc_inodes[0]));
2409 1.25.2.2 yamt
2410 1.25.2.2 yamt /*
2411 1.25.2.2 yamt * Keep track of where we found this so location won't be
2412 1.25.2.2 yamt * overwritten.
2413 1.25.2.2 yamt */
2414 1.25.2.2 yamt if (wc->wc_clear) {
2415 1.25.2.2 yamt wr->wr_inodestail = oldoff;
2416 1.25.2.2 yamt wr->wr_inodescnt = 0;
2417 1.25.2.2 yamt if (wr->wr_inodes != NULL) {
2418 1.25.2.2 yamt wapbl_free(wr->wr_inodes, oldsize);
2419 1.25.2.2 yamt wr->wr_inodes = NULL;
2420 1.25.2.2 yamt }
2421 1.25.2.2 yamt }
2422 1.25.2.2 yamt wr->wr_inodeshead = newoff;
2423 1.25.2.2 yamt if (wc->wc_inocnt == 0)
2424 1.25.2.2 yamt return;
2425 1.25.2.2 yamt
2426 1.25.2.2 yamt new_inodes = wapbl_malloc((wr->wr_inodescnt + wc->wc_inocnt) *
2427 1.25.2.2 yamt sizeof(wr->wr_inodes[0]));
2428 1.25.2.2 yamt if (wr->wr_inodes != NULL) {
2429 1.25.2.2 yamt memcpy(new_inodes, wr->wr_inodes, oldsize);
2430 1.25.2.2 yamt wapbl_free(wr->wr_inodes, oldsize);
2431 1.25.2.2 yamt }
2432 1.25.2.2 yamt wr->wr_inodes = new_inodes;
2433 1.25.2.2 yamt memcpy(&wr->wr_inodes[wr->wr_inodescnt], wc->wc_inodes,
2434 1.25.2.2 yamt wc->wc_inocnt * sizeof(wr->wr_inodes[0]));
2435 1.25.2.2 yamt wr->wr_inodescnt += wc->wc_inocnt;
2436 1.25.2.2 yamt }
2437 1.25.2.2 yamt
2438 1.25.2.2 yamt static int
2439 1.25.2.2 yamt wapbl_replay_process(struct wapbl_replay *wr, off_t head, off_t tail)
2440 1.25.2.2 yamt {
2441 1.25.2.2 yamt off_t off;
2442 1.25.2.2 yamt int error;
2443 1.25.2.2 yamt
2444 1.25.2.2 yamt int logblklen = 1 << wr->wr_log_dev_bshift;
2445 1.25.2.2 yamt
2446 1.25.2.2 yamt wapbl_blkhash_clear(wr);
2447 1.25.2.2 yamt
2448 1.25.2.2 yamt off = tail;
2449 1.25.2.2 yamt while (off != head) {
2450 1.25.2.2 yamt struct wapbl_wc_null *wcn;
2451 1.25.2.2 yamt off_t saveoff = off;
2452 1.25.2.2 yamt error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2453 1.25.2.2 yamt if (error)
2454 1.25.2.2 yamt goto errout;
2455 1.25.2.2 yamt wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2456 1.25.2.2 yamt switch (wcn->wc_type) {
2457 1.25.2.2 yamt case WAPBL_WC_BLOCKS:
2458 1.25.2.2 yamt wapbl_replay_process_blocks(wr, &off);
2459 1.25.2.2 yamt break;
2460 1.25.2.2 yamt
2461 1.25.2.2 yamt case WAPBL_WC_REVOCATIONS:
2462 1.25.2.2 yamt wapbl_replay_process_revocations(wr);
2463 1.25.2.2 yamt break;
2464 1.25.2.2 yamt
2465 1.25.2.2 yamt case WAPBL_WC_INODES:
2466 1.25.2.2 yamt wapbl_replay_process_inodes(wr, saveoff, off);
2467 1.25.2.2 yamt break;
2468 1.25.2.2 yamt
2469 1.25.2.2 yamt default:
2470 1.25.2.2 yamt printf("Unrecognized wapbl type: 0x%08x\n",
2471 1.25.2.2 yamt wcn->wc_type);
2472 1.25.2.2 yamt error = EFTYPE;
2473 1.25.2.2 yamt goto errout;
2474 1.25.2.2 yamt }
2475 1.25.2.2 yamt wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2476 1.25.2.2 yamt if (off != saveoff) {
2477 1.25.2.2 yamt printf("wapbl_replay: corrupted records\n");
2478 1.25.2.2 yamt error = EFTYPE;
2479 1.25.2.2 yamt goto errout;
2480 1.25.2.2 yamt }
2481 1.25.2.2 yamt }
2482 1.25.2.2 yamt return 0;
2483 1.25.2.2 yamt
2484 1.25.2.2 yamt errout:
2485 1.25.2.2 yamt wapbl_blkhash_clear(wr);
2486 1.25.2.2 yamt return error;
2487 1.25.2.2 yamt }
2488 1.25.2.2 yamt
2489 1.25.2.2 yamt #if 0
2490 1.25.2.2 yamt int
2491 1.25.2.2 yamt wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
2492 1.25.2.2 yamt {
2493 1.25.2.2 yamt off_t off;
2494 1.25.2.2 yamt int mismatchcnt = 0;
2495 1.25.2.2 yamt int logblklen = 1 << wr->wr_log_dev_bshift;
2496 1.25.2.2 yamt int fsblklen = 1 << wr->wr_fs_dev_bshift;
2497 1.25.2.2 yamt void *scratch1 = wapbl_malloc(MAXBSIZE);
2498 1.25.2.2 yamt void *scratch2 = wapbl_malloc(MAXBSIZE);
2499 1.25.2.2 yamt int error = 0;
2500 1.25.2.2 yamt
2501 1.25.2.2 yamt KDASSERT(wapbl_replay_isopen(wr));
2502 1.25.2.2 yamt
2503 1.25.2.2 yamt off = wch->wc_tail;
2504 1.25.2.2 yamt while (off != wch->wc_head) {
2505 1.25.2.2 yamt struct wapbl_wc_null *wcn;
2506 1.25.2.2 yamt #ifdef DEBUG
2507 1.25.2.2 yamt off_t saveoff = off;
2508 1.25.2.2 yamt #endif
2509 1.25.2.2 yamt error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2510 1.25.2.2 yamt if (error)
2511 1.25.2.2 yamt goto out;
2512 1.25.2.2 yamt wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2513 1.25.2.2 yamt switch (wcn->wc_type) {
2514 1.25.2.2 yamt case WAPBL_WC_BLOCKS:
2515 1.25.2.2 yamt {
2516 1.25.2.2 yamt struct wapbl_wc_blocklist *wc =
2517 1.25.2.2 yamt (struct wapbl_wc_blocklist *)wr->wr_scratch;
2518 1.25.2.2 yamt int i;
2519 1.25.2.2 yamt for (i = 0; i < wc->wc_blkcount; i++) {
2520 1.25.2.2 yamt int foundcnt = 0;
2521 1.25.2.2 yamt int dirtycnt = 0;
2522 1.25.2.2 yamt int j, n;
2523 1.25.2.2 yamt /*
2524 1.25.2.2 yamt * Check each physical block into the
2525 1.25.2.2 yamt * hashtable independently
2526 1.25.2.2 yamt */
2527 1.25.2.2 yamt n = wc->wc_blocks[i].wc_dlen >>
2528 1.25.2.2 yamt wch->wc_fs_dev_bshift;
2529 1.25.2.2 yamt for (j = 0; j < n; j++) {
2530 1.25.2.2 yamt struct wapbl_blk *wb =
2531 1.25.2.2 yamt wapbl_blkhash_get(wr,
2532 1.25.2.2 yamt wc->wc_blocks[i].wc_daddr + j);
2533 1.25.2.2 yamt if (wb && (wb->wb_off == off)) {
2534 1.25.2.2 yamt foundcnt++;
2535 1.25.2.2 yamt error =
2536 1.25.2.2 yamt wapbl_circ_read(wr,
2537 1.25.2.2 yamt scratch1, fsblklen,
2538 1.25.2.2 yamt &off);
2539 1.25.2.2 yamt if (error)
2540 1.25.2.2 yamt goto out;
2541 1.25.2.2 yamt error =
2542 1.25.2.2 yamt wapbl_read(scratch2,
2543 1.25.2.2 yamt fsblklen, fsdevvp,
2544 1.25.2.2 yamt wb->wb_blk);
2545 1.25.2.2 yamt if (error)
2546 1.25.2.2 yamt goto out;
2547 1.25.2.2 yamt if (memcmp(scratch1,
2548 1.25.2.2 yamt scratch2,
2549 1.25.2.2 yamt fsblklen)) {
2550 1.25.2.2 yamt printf(
2551 1.25.2.2 yamt "wapbl_verify: mismatch block %"PRId64" at off %"PRIdMAX"\n",
2552 1.25.2.2 yamt wb->wb_blk, (intmax_t)off);
2553 1.25.2.2 yamt dirtycnt++;
2554 1.25.2.2 yamt mismatchcnt++;
2555 1.25.2.2 yamt }
2556 1.25.2.2 yamt } else {
2557 1.25.2.2 yamt wapbl_circ_advance(wr,
2558 1.25.2.2 yamt fsblklen, &off);
2559 1.25.2.2 yamt }
2560 1.25.2.2 yamt }
2561 1.25.2.2 yamt #if 0
2562 1.25.2.2 yamt /*
2563 1.25.2.2 yamt * If all of the blocks in an entry
2564 1.25.2.2 yamt * are clean, then remove all of its
2565 1.25.2.2 yamt * blocks from the hashtable since they
2566 1.25.2.2 yamt * never will need replay.
2567 1.25.2.2 yamt */
2568 1.25.2.2 yamt if ((foundcnt != 0) &&
2569 1.25.2.2 yamt (dirtycnt == 0)) {
2570 1.25.2.2 yamt off = saveoff;
2571 1.25.2.2 yamt wapbl_circ_advance(wr,
2572 1.25.2.2 yamt logblklen, &off);
2573 1.25.2.2 yamt for (j = 0; j < n; j++) {
2574 1.25.2.2 yamt struct wapbl_blk *wb =
2575 1.25.2.2 yamt wapbl_blkhash_get(wr,
2576 1.25.2.2 yamt wc->wc_blocks[i].wc_daddr + j);
2577 1.25.2.2 yamt if (wb &&
2578 1.25.2.2 yamt (wb->wb_off == off)) {
2579 1.25.2.2 yamt wapbl_blkhash_rem(wr, wb->wb_blk);
2580 1.25.2.2 yamt }
2581 1.25.2.2 yamt wapbl_circ_advance(wr,
2582 1.25.2.2 yamt fsblklen, &off);
2583 1.25.2.2 yamt }
2584 1.25.2.2 yamt }
2585 1.25.2.2 yamt #endif
2586 1.25.2.2 yamt }
2587 1.25.2.2 yamt }
2588 1.25.2.2 yamt break;
2589 1.25.2.2 yamt case WAPBL_WC_REVOCATIONS:
2590 1.25.2.2 yamt case WAPBL_WC_INODES:
2591 1.25.2.2 yamt break;
2592 1.25.2.2 yamt default:
2593 1.25.2.2 yamt KASSERT(0);
2594 1.25.2.2 yamt }
2595 1.25.2.2 yamt #ifdef DEBUG
2596 1.25.2.2 yamt wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2597 1.25.2.2 yamt KASSERT(off == saveoff);
2598 1.25.2.2 yamt #endif
2599 1.25.2.2 yamt }
2600 1.25.2.2 yamt out:
2601 1.25.2.2 yamt wapbl_free(scratch1, MAXBSIZE);
2602 1.25.2.2 yamt wapbl_free(scratch2, MAXBSIZE);
2603 1.25.2.2 yamt if (!error && mismatchcnt)
2604 1.25.2.2 yamt error = EFTYPE;
2605 1.25.2.2 yamt return error;
2606 1.25.2.2 yamt }
2607 1.25.2.2 yamt #endif
2608 1.25.2.2 yamt
2609 1.25.2.2 yamt int
2610 1.25.2.2 yamt wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
2611 1.25.2.2 yamt {
2612 1.25.2.2 yamt struct wapbl_blk *wb;
2613 1.25.2.2 yamt size_t i;
2614 1.25.2.2 yamt off_t off;
2615 1.25.2.2 yamt void *scratch;
2616 1.25.2.2 yamt int error = 0;
2617 1.25.2.2 yamt int fsblklen = 1 << wr->wr_fs_dev_bshift;
2618 1.25.2.2 yamt
2619 1.25.2.2 yamt KDASSERT(wapbl_replay_isopen(wr));
2620 1.25.2.2 yamt
2621 1.25.2.2 yamt scratch = wapbl_malloc(MAXBSIZE);
2622 1.25.2.2 yamt
2623 1.25.2.2 yamt for (i = 0; i < wr->wr_blkhashmask; ++i) {
2624 1.25.2.2 yamt LIST_FOREACH(wb, &wr->wr_blkhash[i], wb_hash) {
2625 1.25.2.2 yamt off = wb->wb_off;
2626 1.25.2.2 yamt error = wapbl_circ_read(wr, scratch, fsblklen, &off);
2627 1.25.2.2 yamt if (error)
2628 1.25.2.2 yamt break;
2629 1.25.2.2 yamt error = wapbl_write(scratch, fsblklen, fsdevvp,
2630 1.25.2.2 yamt wb->wb_blk);
2631 1.25.2.2 yamt if (error)
2632 1.25.2.2 yamt break;
2633 1.25.2.2 yamt }
2634 1.25.2.2 yamt }
2635 1.25.2.2 yamt
2636 1.25.2.2 yamt wapbl_free(scratch, MAXBSIZE);
2637 1.25.2.2 yamt return error;
2638 1.25.2.2 yamt }
2639 1.25.2.2 yamt
2640 1.25.2.2 yamt int
2641 1.25.2.2 yamt wapbl_replay_can_read(struct wapbl_replay *wr, daddr_t blk, long len)
2642 1.25.2.2 yamt {
2643 1.25.2.2 yamt int fsblklen = 1 << wr->wr_fs_dev_bshift;
2644 1.25.2.2 yamt
2645 1.25.2.2 yamt KDASSERT(wapbl_replay_isopen(wr));
2646 1.25.2.2 yamt KASSERT((len % fsblklen) == 0);
2647 1.25.2.2 yamt
2648 1.25.2.2 yamt while (len != 0) {
2649 1.25.2.2 yamt struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2650 1.25.2.2 yamt if (wb)
2651 1.25.2.2 yamt return 1;
2652 1.25.2.2 yamt len -= fsblklen;
2653 1.25.2.2 yamt }
2654 1.25.2.2 yamt return 0;
2655 1.25.2.2 yamt }
2656 1.25.2.2 yamt
2657 1.25.2.2 yamt int
2658 1.25.2.2 yamt wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
2659 1.25.2.2 yamt {
2660 1.25.2.2 yamt int fsblklen = 1 << wr->wr_fs_dev_bshift;
2661 1.25.2.2 yamt
2662 1.25.2.2 yamt KDASSERT(wapbl_replay_isopen(wr));
2663 1.25.2.2 yamt
2664 1.25.2.2 yamt KASSERT((len % fsblklen) == 0);
2665 1.25.2.2 yamt
2666 1.25.2.2 yamt while (len != 0) {
2667 1.25.2.2 yamt struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2668 1.25.2.2 yamt if (wb) {
2669 1.25.2.2 yamt off_t off = wb->wb_off;
2670 1.25.2.2 yamt int error;
2671 1.25.2.2 yamt error = wapbl_circ_read(wr, data, fsblklen, &off);
2672 1.25.2.2 yamt if (error)
2673 1.25.2.2 yamt return error;
2674 1.25.2.2 yamt }
2675 1.25.2.2 yamt data = (uint8_t *)data + fsblklen;
2676 1.25.2.2 yamt len -= fsblklen;
2677 1.25.2.2 yamt blk++;
2678 1.25.2.2 yamt }
2679 1.25.2.2 yamt return 0;
2680 1.25.2.2 yamt }
2681