vfs_wapbl.c revision 1.1.4.1 1 1.1.4.1 haad /* $NetBSD: vfs_wapbl.c,v 1.1.4.1 2008/10/19 22:17:29 haad Exp $ */
2 1.1.4.1 haad
3 1.1.4.1 haad /*-
4 1.1.4.1 haad * Copyright (c) 2003,2008 The NetBSD Foundation, Inc.
5 1.1.4.1 haad * All rights reserved.
6 1.1.4.1 haad *
7 1.1.4.1 haad * This code is derived from software contributed to The NetBSD Foundation
8 1.1.4.1 haad * by Wasabi Systems, Inc.
9 1.1.4.1 haad *
10 1.1.4.1 haad * Redistribution and use in source and binary forms, with or without
11 1.1.4.1 haad * modification, are permitted provided that the following conditions
12 1.1.4.1 haad * are met:
13 1.1.4.1 haad * 1. Redistributions of source code must retain the above copyright
14 1.1.4.1 haad * notice, this list of conditions and the following disclaimer.
15 1.1.4.1 haad * 2. Redistributions in binary form must reproduce the above copyright
16 1.1.4.1 haad * notice, this list of conditions and the following disclaimer in the
17 1.1.4.1 haad * documentation and/or other materials provided with the distribution.
18 1.1.4.1 haad *
19 1.1.4.1 haad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1.4.1 haad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1.4.1 haad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1.4.1 haad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1.4.1 haad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1.4.1 haad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1.4.1 haad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1.4.1 haad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1.4.1 haad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1.4.1 haad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1.4.1 haad * POSSIBILITY OF SUCH DAMAGE.
30 1.1.4.1 haad */
31 1.1.4.1 haad
32 1.1.4.1 haad /*
33 1.1.4.1 haad * This implements file system independent write ahead filesystem logging.
34 1.1.4.1 haad */
35 1.1.4.1 haad #include <sys/cdefs.h>
36 1.1.4.1 haad __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.1.4.1 2008/10/19 22:17:29 haad Exp $");
37 1.1.4.1 haad
38 1.1.4.1 haad #include <sys/param.h>
39 1.1.4.1 haad
40 1.1.4.1 haad #ifdef _KERNEL
41 1.1.4.1 haad #include <sys/param.h>
42 1.1.4.1 haad #include <sys/namei.h>
43 1.1.4.1 haad #include <sys/proc.h>
44 1.1.4.1 haad #include <sys/uio.h>
45 1.1.4.1 haad #include <sys/vnode.h>
46 1.1.4.1 haad #include <sys/file.h>
47 1.1.4.1 haad #include <sys/malloc.h>
48 1.1.4.1 haad #include <sys/resourcevar.h>
49 1.1.4.1 haad #include <sys/conf.h>
50 1.1.4.1 haad #include <sys/mount.h>
51 1.1.4.1 haad #include <sys/kernel.h>
52 1.1.4.1 haad #include <sys/kauth.h>
53 1.1.4.1 haad #include <sys/mutex.h>
54 1.1.4.1 haad #include <sys/atomic.h>
55 1.1.4.1 haad #include <sys/wapbl.h>
56 1.1.4.1 haad
57 1.1.4.1 haad #if WAPBL_UVM_ALLOC
58 1.1.4.1 haad #include <uvm/uvm.h>
59 1.1.4.1 haad #endif
60 1.1.4.1 haad
61 1.1.4.1 haad #include <miscfs/specfs/specdev.h>
62 1.1.4.1 haad
63 1.1.4.1 haad MALLOC_JUSTDEFINE(M_WAPBL, "wapbl", "write-ahead physical block logging");
64 1.1.4.1 haad #define wapbl_malloc(s) malloc((s), M_WAPBL, M_WAITOK)
65 1.1.4.1 haad #define wapbl_free(a) free((a), M_WAPBL)
66 1.1.4.1 haad #define wapbl_calloc(n, s) malloc((n)*(s), M_WAPBL, M_WAITOK | M_ZERO)
67 1.1.4.1 haad
68 1.1.4.1 haad #else /* !_KERNEL */
69 1.1.4.1 haad #include <assert.h>
70 1.1.4.1 haad #include <errno.h>
71 1.1.4.1 haad #include <stdio.h>
72 1.1.4.1 haad #include <stdbool.h>
73 1.1.4.1 haad #include <stdlib.h>
74 1.1.4.1 haad #include <string.h>
75 1.1.4.1 haad
76 1.1.4.1 haad #include <sys/time.h>
77 1.1.4.1 haad #include <sys/wapbl.h>
78 1.1.4.1 haad
79 1.1.4.1 haad #define KDASSERT(x) assert(x)
80 1.1.4.1 haad #define KASSERT(x) assert(x)
81 1.1.4.1 haad #define wapbl_malloc(s) malloc(s)
82 1.1.4.1 haad #define wapbl_free(a) free(a)
83 1.1.4.1 haad #define wapbl_calloc(n, s) calloc((n), (s))
84 1.1.4.1 haad
85 1.1.4.1 haad #endif /* !_KERNEL */
86 1.1.4.1 haad
87 1.1.4.1 haad /*
88 1.1.4.1 haad * INTERNAL DATA STRUCTURES
89 1.1.4.1 haad */
90 1.1.4.1 haad
91 1.1.4.1 haad /*
92 1.1.4.1 haad * This structure holds per-mount log information.
93 1.1.4.1 haad *
94 1.1.4.1 haad * Legend: a = atomic access only
95 1.1.4.1 haad * r = read-only after init
96 1.1.4.1 haad * l = rwlock held
97 1.1.4.1 haad * m = mutex held
98 1.1.4.1 haad * u = unlocked access ok
99 1.1.4.1 haad * b = bufcache_lock held
100 1.1.4.1 haad */
101 1.1.4.1 haad struct wapbl {
102 1.1.4.1 haad struct vnode *wl_logvp; /* r: log here */
103 1.1.4.1 haad struct vnode *wl_devvp; /* r: log on this device */
104 1.1.4.1 haad struct mount *wl_mount; /* r: mountpoint wl is associated with */
105 1.1.4.1 haad daddr_t wl_logpbn; /* r: Physical block number of start of log */
106 1.1.4.1 haad int wl_log_dev_bshift; /* r: logarithm of device block size of log
107 1.1.4.1 haad device */
108 1.1.4.1 haad int wl_fs_dev_bshift; /* r: logarithm of device block size of
109 1.1.4.1 haad filesystem device */
110 1.1.4.1 haad
111 1.1.4.1 haad unsigned wl_lock_count; /* m: Count of transactions in progress */
112 1.1.4.1 haad
113 1.1.4.1 haad size_t wl_circ_size; /* r: Number of bytes in buffer of log */
114 1.1.4.1 haad size_t wl_circ_off; /* r: Number of bytes reserved at start */
115 1.1.4.1 haad
116 1.1.4.1 haad size_t wl_bufcount_max; /* r: Number of buffers reserved for log */
117 1.1.4.1 haad size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */
118 1.1.4.1 haad
119 1.1.4.1 haad off_t wl_head; /* l: Byte offset of log head */
120 1.1.4.1 haad off_t wl_tail; /* l: Byte offset of log tail */
121 1.1.4.1 haad /*
122 1.1.4.1 haad * head == tail == 0 means log is empty
123 1.1.4.1 haad * head == tail != 0 means log is full
124 1.1.4.1 haad * see assertions in wapbl_advance() for other boundary conditions.
125 1.1.4.1 haad * only truncate moves the tail, except when flush sets it to
126 1.1.4.1 haad * wl_header_size only flush moves the head, except when truncate
127 1.1.4.1 haad * sets it to 0.
128 1.1.4.1 haad */
129 1.1.4.1 haad
130 1.1.4.1 haad struct wapbl_wc_header *wl_wc_header; /* l */
131 1.1.4.1 haad void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */
132 1.1.4.1 haad
133 1.1.4.1 haad kmutex_t wl_mtx; /* u: short-term lock */
134 1.1.4.1 haad krwlock_t wl_rwlock; /* u: File system transaction lock */
135 1.1.4.1 haad
136 1.1.4.1 haad /*
137 1.1.4.1 haad * Must be held while accessing
138 1.1.4.1 haad * wl_count or wl_bufs or head or tail
139 1.1.4.1 haad */
140 1.1.4.1 haad
141 1.1.4.1 haad /*
142 1.1.4.1 haad * Callback called from within the flush routine to flush any extra
143 1.1.4.1 haad * bits. Note that flush may be skipped without calling this if
144 1.1.4.1 haad * there are no outstanding buffers in the transaction.
145 1.1.4.1 haad */
146 1.1.4.1 haad wapbl_flush_fn_t wl_flush; /* r */
147 1.1.4.1 haad wapbl_flush_fn_t wl_flush_abort;/* r */
148 1.1.4.1 haad
149 1.1.4.1 haad size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */
150 1.1.4.1 haad size_t wl_bufcount; /* m: Count of buffers in wl_bufs */
151 1.1.4.1 haad size_t wl_bcount; /* m: Total bcount of wl_bufs */
152 1.1.4.1 haad
153 1.1.4.1 haad LIST_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
154 1.1.4.1 haad
155 1.1.4.1 haad kcondvar_t wl_reclaimable_cv; /* m (obviously) */
156 1.1.4.1 haad size_t wl_reclaimable_bytes; /* m: Amount of space available for
157 1.1.4.1 haad reclamation by truncate */
158 1.1.4.1 haad int wl_error_count; /* m: # of wl_entries with errors */
159 1.1.4.1 haad size_t wl_reserved_bytes; /* never truncate log smaller than this */
160 1.1.4.1 haad
161 1.1.4.1 haad #ifdef WAPBL_DEBUG_BUFBYTES
162 1.1.4.1 haad size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
163 1.1.4.1 haad #endif
164 1.1.4.1 haad
165 1.1.4.1 haad daddr_t *wl_deallocblks;/* l: address of block */
166 1.1.4.1 haad int *wl_dealloclens; /* l: size of block (fragments, kom ihg) */
167 1.1.4.1 haad int wl_dealloccnt; /* l: total count */
168 1.1.4.1 haad int wl_dealloclim; /* l: max count */
169 1.1.4.1 haad
170 1.1.4.1 haad /* hashtable of inode numbers for allocated but unlinked inodes */
171 1.1.4.1 haad /* synch ??? */
172 1.1.4.1 haad LIST_HEAD(wapbl_ino_head, wapbl_ino) *wl_inohash;
173 1.1.4.1 haad u_long wl_inohashmask;
174 1.1.4.1 haad int wl_inohashcnt;
175 1.1.4.1 haad
176 1.1.4.1 haad SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* On disk transaction
177 1.1.4.1 haad accounting */
178 1.1.4.1 haad };
179 1.1.4.1 haad
180 1.1.4.1 haad #ifdef WAPBL_DEBUG_PRINT
181 1.1.4.1 haad int wapbl_debug_print = WAPBL_DEBUG_PRINT;
182 1.1.4.1 haad #endif
183 1.1.4.1 haad
184 1.1.4.1 haad /****************************************************************/
185 1.1.4.1 haad #ifdef _KERNEL
186 1.1.4.1 haad
187 1.1.4.1 haad #ifdef WAPBL_DEBUG
188 1.1.4.1 haad struct wapbl *wapbl_debug_wl;
189 1.1.4.1 haad #endif
190 1.1.4.1 haad
191 1.1.4.1 haad static int wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail);
192 1.1.4.1 haad static int wapbl_write_blocks(struct wapbl *wl, off_t *offp);
193 1.1.4.1 haad static int wapbl_write_revocations(struct wapbl *wl, off_t *offp);
194 1.1.4.1 haad static int wapbl_write_inodes(struct wapbl *wl, off_t *offp);
195 1.1.4.1 haad #endif /* _KERNEL */
196 1.1.4.1 haad
197 1.1.4.1 haad static int wapbl_replay_prescan(struct wapbl_replay *wr);
198 1.1.4.1 haad static int wapbl_replay_get_inodes(struct wapbl_replay *wr);
199 1.1.4.1 haad
200 1.1.4.1 haad static __inline size_t wapbl_space_free(size_t avail, off_t head,
201 1.1.4.1 haad off_t tail);
202 1.1.4.1 haad static __inline size_t wapbl_space_used(size_t avail, off_t head,
203 1.1.4.1 haad off_t tail);
204 1.1.4.1 haad
205 1.1.4.1 haad #ifdef _KERNEL
206 1.1.4.1 haad
207 1.1.4.1 haad #define WAPBL_INODETRK_SIZE 83
208 1.1.4.1 haad static int wapbl_ino_pool_refcount;
209 1.1.4.1 haad static struct pool wapbl_ino_pool;
210 1.1.4.1 haad struct wapbl_ino {
211 1.1.4.1 haad LIST_ENTRY(wapbl_ino) wi_hash;
212 1.1.4.1 haad ino_t wi_ino;
213 1.1.4.1 haad mode_t wi_mode;
214 1.1.4.1 haad };
215 1.1.4.1 haad
216 1.1.4.1 haad static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
217 1.1.4.1 haad static void wapbl_inodetrk_free(struct wapbl *wl);
218 1.1.4.1 haad static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
219 1.1.4.1 haad
220 1.1.4.1 haad static size_t wapbl_transaction_len(struct wapbl *wl);
221 1.1.4.1 haad static __inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
222 1.1.4.1 haad
223 1.1.4.1 haad /*
224 1.1.4.1 haad * This is useful for debugging. If set, the log will
225 1.1.4.1 haad * only be truncated when necessary.
226 1.1.4.1 haad */
227 1.1.4.1 haad int wapbl_lazy_truncate = 0;
228 1.1.4.1 haad
229 1.1.4.1 haad struct wapbl_ops wapbl_ops = {
230 1.1.4.1 haad .wo_wapbl_discard = wapbl_discard,
231 1.1.4.1 haad .wo_wapbl_replay_isopen = wapbl_replay_isopen1,
232 1.1.4.1 haad .wo_wapbl_replay_read = wapbl_replay_read,
233 1.1.4.1 haad .wo_wapbl_add_buf = wapbl_add_buf,
234 1.1.4.1 haad .wo_wapbl_remove_buf = wapbl_remove_buf,
235 1.1.4.1 haad .wo_wapbl_resize_buf = wapbl_resize_buf,
236 1.1.4.1 haad .wo_wapbl_begin = wapbl_begin,
237 1.1.4.1 haad .wo_wapbl_end = wapbl_end,
238 1.1.4.1 haad .wo_wapbl_junlock_assert= wapbl_junlock_assert,
239 1.1.4.1 haad
240 1.1.4.1 haad /* XXX: the following is only used to say "this is a wapbl buf" */
241 1.1.4.1 haad .wo_wapbl_biodone = wapbl_biodone,
242 1.1.4.1 haad };
243 1.1.4.1 haad
244 1.1.4.1 haad void
245 1.1.4.1 haad wapbl_init()
246 1.1.4.1 haad {
247 1.1.4.1 haad
248 1.1.4.1 haad malloc_type_attach(M_WAPBL);
249 1.1.4.1 haad }
250 1.1.4.1 haad
251 1.1.4.1 haad int
252 1.1.4.1 haad wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
253 1.1.4.1 haad daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
254 1.1.4.1 haad wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
255 1.1.4.1 haad {
256 1.1.4.1 haad struct wapbl *wl;
257 1.1.4.1 haad struct vnode *devvp;
258 1.1.4.1 haad daddr_t logpbn;
259 1.1.4.1 haad int error;
260 1.1.4.1 haad int log_dev_bshift = DEV_BSHIFT;
261 1.1.4.1 haad int fs_dev_bshift = DEV_BSHIFT;
262 1.1.4.1 haad int run;
263 1.1.4.1 haad
264 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_start: vp=%p off=%" PRId64
265 1.1.4.1 haad " count=%zu blksize=%zu\n", vp, off, count, blksize));
266 1.1.4.1 haad
267 1.1.4.1 haad if (log_dev_bshift > fs_dev_bshift) {
268 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_OPEN,
269 1.1.4.1 haad ("wapbl: log device's block size cannot be larger "
270 1.1.4.1 haad "than filesystem's\n"));
271 1.1.4.1 haad /*
272 1.1.4.1 haad * Not currently implemented, although it could be if
273 1.1.4.1 haad * needed someday.
274 1.1.4.1 haad */
275 1.1.4.1 haad return ENOSYS;
276 1.1.4.1 haad }
277 1.1.4.1 haad
278 1.1.4.1 haad if (off < 0)
279 1.1.4.1 haad return EINVAL;
280 1.1.4.1 haad
281 1.1.4.1 haad if (blksize < DEV_BSIZE)
282 1.1.4.1 haad return EINVAL;
283 1.1.4.1 haad if (blksize % DEV_BSIZE)
284 1.1.4.1 haad return EINVAL;
285 1.1.4.1 haad
286 1.1.4.1 haad /* XXXTODO: verify that the full load is writable */
287 1.1.4.1 haad
288 1.1.4.1 haad /*
289 1.1.4.1 haad * XXX check for minimum log size
290 1.1.4.1 haad * minimum is governed by minimum amount of space
291 1.1.4.1 haad * to complete a transaction. (probably truncate)
292 1.1.4.1 haad */
293 1.1.4.1 haad /* XXX for now pick something minimal */
294 1.1.4.1 haad if ((count * blksize) < MAXPHYS) {
295 1.1.4.1 haad return ENOSPC;
296 1.1.4.1 haad }
297 1.1.4.1 haad
298 1.1.4.1 haad if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
299 1.1.4.1 haad return error;
300 1.1.4.1 haad }
301 1.1.4.1 haad
302 1.1.4.1 haad wl = wapbl_calloc(1, sizeof(*wl));
303 1.1.4.1 haad rw_init(&wl->wl_rwlock);
304 1.1.4.1 haad mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
305 1.1.4.1 haad cv_init(&wl->wl_reclaimable_cv, "wapblrec");
306 1.1.4.1 haad LIST_INIT(&wl->wl_bufs);
307 1.1.4.1 haad SIMPLEQ_INIT(&wl->wl_entries);
308 1.1.4.1 haad
309 1.1.4.1 haad wl->wl_logvp = vp;
310 1.1.4.1 haad wl->wl_devvp = devvp;
311 1.1.4.1 haad wl->wl_mount = mp;
312 1.1.4.1 haad wl->wl_logpbn = logpbn;
313 1.1.4.1 haad wl->wl_log_dev_bshift = log_dev_bshift;
314 1.1.4.1 haad wl->wl_fs_dev_bshift = fs_dev_bshift;
315 1.1.4.1 haad
316 1.1.4.1 haad wl->wl_flush = flushfn;
317 1.1.4.1 haad wl->wl_flush_abort = flushabortfn;
318 1.1.4.1 haad
319 1.1.4.1 haad /* Reserve two log device blocks for the commit headers */
320 1.1.4.1 haad wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
321 1.1.4.1 haad wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
322 1.1.4.1 haad /* truncate the log usage to a multiple of log_dev_bshift */
323 1.1.4.1 haad wl->wl_circ_size >>= wl->wl_log_dev_bshift;
324 1.1.4.1 haad wl->wl_circ_size <<= wl->wl_log_dev_bshift;
325 1.1.4.1 haad
326 1.1.4.1 haad /*
327 1.1.4.1 haad * wl_bufbytes_max limits the size of the in memory transaction space.
328 1.1.4.1 haad * - Since buffers are allocated and accounted for in units of
329 1.1.4.1 haad * PAGE_SIZE it is required to be a multiple of PAGE_SIZE
330 1.1.4.1 haad * (i.e. 1<<PAGE_SHIFT)
331 1.1.4.1 haad * - Since the log device has to be written in units of
332 1.1.4.1 haad * 1<<wl_log_dev_bshift it is required to be a mulitple of
333 1.1.4.1 haad * 1<<wl_log_dev_bshift.
334 1.1.4.1 haad * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
335 1.1.4.1 haad * it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
336 1.1.4.1 haad * Therefore it must be multiple of the least common multiple of those
337 1.1.4.1 haad * three quantities. Fortunately, all of those quantities are
338 1.1.4.1 haad * guaranteed to be a power of two, and the least common multiple of
339 1.1.4.1 haad * a set of numbers which are all powers of two is simply the maximum
340 1.1.4.1 haad * of those numbers. Finally, the maximum logarithm of a power of two
341 1.1.4.1 haad * is the same as the log of the maximum power of two. So we can do
342 1.1.4.1 haad * the following operations to size wl_bufbytes_max:
343 1.1.4.1 haad */
344 1.1.4.1 haad
345 1.1.4.1 haad /* XXX fix actual number of pages reserved per filesystem. */
346 1.1.4.1 haad wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
347 1.1.4.1 haad
348 1.1.4.1 haad /* Round wl_bufbytes_max to the largest power of two constraint */
349 1.1.4.1 haad wl->wl_bufbytes_max >>= PAGE_SHIFT;
350 1.1.4.1 haad wl->wl_bufbytes_max <<= PAGE_SHIFT;
351 1.1.4.1 haad wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
352 1.1.4.1 haad wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
353 1.1.4.1 haad wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
354 1.1.4.1 haad wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
355 1.1.4.1 haad
356 1.1.4.1 haad /* XXX maybe use filesystem fragment size instead of 1024 */
357 1.1.4.1 haad /* XXX fix actual number of buffers reserved per filesystem. */
358 1.1.4.1 haad wl->wl_bufcount_max = (nbuf / 2) * 1024;
359 1.1.4.1 haad
360 1.1.4.1 haad /* XXX tie this into resource estimation */
361 1.1.4.1 haad wl->wl_dealloclim = 2 * btodb(wl->wl_bufbytes_max);
362 1.1.4.1 haad
363 1.1.4.1 haad #if WAPBL_UVM_ALLOC
364 1.1.4.1 haad wl->wl_deallocblks = (void *) uvm_km_zalloc(kernel_map,
365 1.1.4.1 haad round_page(sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim));
366 1.1.4.1 haad KASSERT(wl->wl_deallocblks != NULL);
367 1.1.4.1 haad wl->wl_dealloclens = (void *) uvm_km_zalloc(kernel_map,
368 1.1.4.1 haad round_page(sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim));
369 1.1.4.1 haad KASSERT(wl->wl_dealloclens != NULL);
370 1.1.4.1 haad #else
371 1.1.4.1 haad wl->wl_deallocblks = wapbl_malloc(sizeof(*wl->wl_deallocblks) *
372 1.1.4.1 haad wl->wl_dealloclim);
373 1.1.4.1 haad wl->wl_dealloclens = wapbl_malloc(sizeof(*wl->wl_dealloclens) *
374 1.1.4.1 haad wl->wl_dealloclim);
375 1.1.4.1 haad #endif
376 1.1.4.1 haad
377 1.1.4.1 haad wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
378 1.1.4.1 haad
379 1.1.4.1 haad /* Initialize the commit header */
380 1.1.4.1 haad {
381 1.1.4.1 haad struct wapbl_wc_header *wc;
382 1.1.4.1 haad size_t len = 1<<wl->wl_log_dev_bshift;
383 1.1.4.1 haad wc = wapbl_calloc(1, len);
384 1.1.4.1 haad wc->wc_type = WAPBL_WC_HEADER;
385 1.1.4.1 haad wc->wc_len = len;
386 1.1.4.1 haad wc->wc_circ_off = wl->wl_circ_off;
387 1.1.4.1 haad wc->wc_circ_size = wl->wl_circ_size;
388 1.1.4.1 haad /* XXX wc->wc_fsid */
389 1.1.4.1 haad wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
390 1.1.4.1 haad wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
391 1.1.4.1 haad wl->wl_wc_header = wc;
392 1.1.4.1 haad wl->wl_wc_scratch = wapbl_malloc(len);
393 1.1.4.1 haad }
394 1.1.4.1 haad
395 1.1.4.1 haad /*
396 1.1.4.1 haad * if there was an existing set of unlinked but
397 1.1.4.1 haad * allocated inodes, preserve it in the new
398 1.1.4.1 haad * log.
399 1.1.4.1 haad */
400 1.1.4.1 haad if (wr && wr->wr_inodescnt) {
401 1.1.4.1 haad int i;
402 1.1.4.1 haad
403 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
404 1.1.4.1 haad ("wapbl_start: reusing log with %d inodes\n",
405 1.1.4.1 haad wr->wr_inodescnt));
406 1.1.4.1 haad
407 1.1.4.1 haad /*
408 1.1.4.1 haad * Its only valid to reuse the replay log if its
409 1.1.4.1 haad * the same as the new log we just opened.
410 1.1.4.1 haad */
411 1.1.4.1 haad KDASSERT(!wapbl_replay_isopen(wr));
412 1.1.4.1 haad KASSERT(devvp->v_rdev == wr->wr_devvp->v_rdev);
413 1.1.4.1 haad KASSERT(logpbn == wr->wr_logpbn);
414 1.1.4.1 haad KASSERT(wl->wl_circ_size == wr->wr_wc_header.wc_circ_size);
415 1.1.4.1 haad KASSERT(wl->wl_circ_off == wr->wr_wc_header.wc_circ_off);
416 1.1.4.1 haad KASSERT(wl->wl_log_dev_bshift ==
417 1.1.4.1 haad wr->wr_wc_header.wc_log_dev_bshift);
418 1.1.4.1 haad KASSERT(wl->wl_fs_dev_bshift ==
419 1.1.4.1 haad wr->wr_wc_header.wc_fs_dev_bshift);
420 1.1.4.1 haad
421 1.1.4.1 haad wl->wl_wc_header->wc_generation =
422 1.1.4.1 haad wr->wr_wc_header.wc_generation + 1;
423 1.1.4.1 haad
424 1.1.4.1 haad for (i = 0; i < wr->wr_inodescnt; i++)
425 1.1.4.1 haad wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
426 1.1.4.1 haad wr->wr_inodes[i].wr_imode);
427 1.1.4.1 haad
428 1.1.4.1 haad /* Make sure new transaction won't overwrite old inodes list */
429 1.1.4.1 haad KDASSERT(wapbl_transaction_len(wl) <=
430 1.1.4.1 haad wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
431 1.1.4.1 haad wr->wr_inodestail));
432 1.1.4.1 haad
433 1.1.4.1 haad wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
434 1.1.4.1 haad wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
435 1.1.4.1 haad wapbl_transaction_len(wl);
436 1.1.4.1 haad
437 1.1.4.1 haad error = wapbl_write_inodes(wl, &wl->wl_head);
438 1.1.4.1 haad if (error)
439 1.1.4.1 haad goto errout;
440 1.1.4.1 haad
441 1.1.4.1 haad KASSERT(wl->wl_head != wl->wl_tail);
442 1.1.4.1 haad KASSERT(wl->wl_head != 0);
443 1.1.4.1 haad }
444 1.1.4.1 haad
445 1.1.4.1 haad error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
446 1.1.4.1 haad if (error) {
447 1.1.4.1 haad goto errout;
448 1.1.4.1 haad }
449 1.1.4.1 haad
450 1.1.4.1 haad *wlp = wl;
451 1.1.4.1 haad #if defined(WAPBL_DEBUG)
452 1.1.4.1 haad wapbl_debug_wl = wl;
453 1.1.4.1 haad #endif
454 1.1.4.1 haad
455 1.1.4.1 haad return 0;
456 1.1.4.1 haad errout:
457 1.1.4.1 haad wapbl_discard(wl);
458 1.1.4.1 haad wapbl_free(wl->wl_wc_scratch);
459 1.1.4.1 haad wapbl_free(wl->wl_wc_header);
460 1.1.4.1 haad #if WAPBL_UVM_ALLOC
461 1.1.4.1 haad uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_deallocblks,
462 1.1.4.1 haad round_page(sizeof(*wl->wl_deallocblks *
463 1.1.4.1 haad wl->wl_dealloclim)));
464 1.1.4.1 haad uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_dealloclens,
465 1.1.4.1 haad round_page(sizeof(*wl->wl_dealloclens *
466 1.1.4.1 haad wl->wl_dealloclim)));
467 1.1.4.1 haad #else
468 1.1.4.1 haad wapbl_free(wl->wl_deallocblks);
469 1.1.4.1 haad wapbl_free(wl->wl_dealloclens);
470 1.1.4.1 haad #endif
471 1.1.4.1 haad wapbl_inodetrk_free(wl);
472 1.1.4.1 haad wapbl_free(wl);
473 1.1.4.1 haad
474 1.1.4.1 haad return error;
475 1.1.4.1 haad }
476 1.1.4.1 haad
477 1.1.4.1 haad /*
478 1.1.4.1 haad * Like wapbl_flush, only discards the transaction
479 1.1.4.1 haad * completely
480 1.1.4.1 haad */
481 1.1.4.1 haad
482 1.1.4.1 haad void
483 1.1.4.1 haad wapbl_discard(struct wapbl *wl)
484 1.1.4.1 haad {
485 1.1.4.1 haad struct wapbl_entry *we;
486 1.1.4.1 haad struct buf *bp;
487 1.1.4.1 haad int i;
488 1.1.4.1 haad
489 1.1.4.1 haad /*
490 1.1.4.1 haad * XXX we may consider using upgrade here
491 1.1.4.1 haad * if we want to call flush from inside a transaction
492 1.1.4.1 haad */
493 1.1.4.1 haad rw_enter(&wl->wl_rwlock, RW_WRITER);
494 1.1.4.1 haad wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
495 1.1.4.1 haad wl->wl_dealloccnt);
496 1.1.4.1 haad
497 1.1.4.1 haad #ifdef WAPBL_DEBUG_PRINT
498 1.1.4.1 haad {
499 1.1.4.1 haad struct wapbl_entry *we;
500 1.1.4.1 haad pid_t pid = -1;
501 1.1.4.1 haad lwpid_t lid = -1;
502 1.1.4.1 haad if (curproc)
503 1.1.4.1 haad pid = curproc->p_pid;
504 1.1.4.1 haad if (curlwp)
505 1.1.4.1 haad lid = curlwp->l_lid;
506 1.1.4.1 haad #ifdef WAPBL_DEBUG_BUFBYTES
507 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
508 1.1.4.1 haad ("wapbl_discard: thread %d.%d discarding "
509 1.1.4.1 haad "transaction\n"
510 1.1.4.1 haad "\tbufcount=%zu bufbytes=%zu bcount=%zu "
511 1.1.4.1 haad "deallocs=%d inodes=%d\n"
512 1.1.4.1 haad "\terrcnt = %u, reclaimable=%zu reserved=%zu "
513 1.1.4.1 haad "unsynced=%zu\n",
514 1.1.4.1 haad pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
515 1.1.4.1 haad wl->wl_bcount, wl->wl_dealloccnt,
516 1.1.4.1 haad wl->wl_inohashcnt, wl->wl_error_count,
517 1.1.4.1 haad wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
518 1.1.4.1 haad wl->wl_unsynced_bufbytes));
519 1.1.4.1 haad SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
520 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
521 1.1.4.1 haad ("\tentry: bufcount = %zu, reclaimable = %zu, "
522 1.1.4.1 haad "error = %d, unsynced = %zu\n",
523 1.1.4.1 haad we->we_bufcount, we->we_reclaimable_bytes,
524 1.1.4.1 haad we->we_error, we->we_unsynced_bufbytes));
525 1.1.4.1 haad }
526 1.1.4.1 haad #else /* !WAPBL_DEBUG_BUFBYTES */
527 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
528 1.1.4.1 haad ("wapbl_discard: thread %d.%d discarding transaction\n"
529 1.1.4.1 haad "\tbufcount=%zu bufbytes=%zu bcount=%zu "
530 1.1.4.1 haad "deallocs=%d inodes=%d\n"
531 1.1.4.1 haad "\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
532 1.1.4.1 haad pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
533 1.1.4.1 haad wl->wl_bcount, wl->wl_dealloccnt,
534 1.1.4.1 haad wl->wl_inohashcnt, wl->wl_error_count,
535 1.1.4.1 haad wl->wl_reclaimable_bytes, wl->wl_reserved_bytes));
536 1.1.4.1 haad SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
537 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
538 1.1.4.1 haad ("\tentry: bufcount = %zu, reclaimable = %zu, "
539 1.1.4.1 haad "error = %d\n",
540 1.1.4.1 haad we->we_bufcount, we->we_reclaimable_bytes,
541 1.1.4.1 haad we->we_error));
542 1.1.4.1 haad }
543 1.1.4.1 haad #endif /* !WAPBL_DEBUG_BUFBYTES */
544 1.1.4.1 haad }
545 1.1.4.1 haad #endif /* WAPBL_DEBUG_PRINT */
546 1.1.4.1 haad
547 1.1.4.1 haad for (i = 0; i <= wl->wl_inohashmask; i++) {
548 1.1.4.1 haad struct wapbl_ino_head *wih;
549 1.1.4.1 haad struct wapbl_ino *wi;
550 1.1.4.1 haad
551 1.1.4.1 haad wih = &wl->wl_inohash[i];
552 1.1.4.1 haad while ((wi = LIST_FIRST(wih)) != NULL) {
553 1.1.4.1 haad LIST_REMOVE(wi, wi_hash);
554 1.1.4.1 haad pool_put(&wapbl_ino_pool, wi);
555 1.1.4.1 haad KASSERT(wl->wl_inohashcnt > 0);
556 1.1.4.1 haad wl->wl_inohashcnt--;
557 1.1.4.1 haad }
558 1.1.4.1 haad }
559 1.1.4.1 haad
560 1.1.4.1 haad /*
561 1.1.4.1 haad * clean buffer list
562 1.1.4.1 haad */
563 1.1.4.1 haad mutex_enter(&bufcache_lock);
564 1.1.4.1 haad mutex_enter(&wl->wl_mtx);
565 1.1.4.1 haad while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
566 1.1.4.1 haad if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
567 1.1.4.1 haad /*
568 1.1.4.1 haad * The buffer will be unlocked and
569 1.1.4.1 haad * removed from the transaction in brelse
570 1.1.4.1 haad */
571 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
572 1.1.4.1 haad brelsel(bp, 0);
573 1.1.4.1 haad mutex_enter(&wl->wl_mtx);
574 1.1.4.1 haad }
575 1.1.4.1 haad }
576 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
577 1.1.4.1 haad mutex_exit(&bufcache_lock);
578 1.1.4.1 haad
579 1.1.4.1 haad /*
580 1.1.4.1 haad * Remove references to this wl from wl_entries, free any which
581 1.1.4.1 haad * no longer have buffers, others will be freed in wapbl_biodone
582 1.1.4.1 haad * when they no longer have any buffers.
583 1.1.4.1 haad */
584 1.1.4.1 haad while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
585 1.1.4.1 haad SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
586 1.1.4.1 haad /* XXX should we be accumulating wl_error_count
587 1.1.4.1 haad * and increasing reclaimable bytes ? */
588 1.1.4.1 haad we->we_wapbl = NULL;
589 1.1.4.1 haad if (we->we_bufcount == 0) {
590 1.1.4.1 haad #ifdef WAPBL_DEBUG_BUFBYTES
591 1.1.4.1 haad KASSERT(we->we_unsynced_bufbytes == 0);
592 1.1.4.1 haad #endif
593 1.1.4.1 haad wapbl_free(we);
594 1.1.4.1 haad }
595 1.1.4.1 haad }
596 1.1.4.1 haad
597 1.1.4.1 haad /* Discard list of deallocs */
598 1.1.4.1 haad wl->wl_dealloccnt = 0;
599 1.1.4.1 haad /* XXX should we clear wl_reserved_bytes? */
600 1.1.4.1 haad
601 1.1.4.1 haad KASSERT(wl->wl_bufbytes == 0);
602 1.1.4.1 haad KASSERT(wl->wl_bcount == 0);
603 1.1.4.1 haad KASSERT(wl->wl_bufcount == 0);
604 1.1.4.1 haad KASSERT(LIST_EMPTY(&wl->wl_bufs));
605 1.1.4.1 haad KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
606 1.1.4.1 haad KASSERT(wl->wl_inohashcnt == 0);
607 1.1.4.1 haad
608 1.1.4.1 haad rw_exit(&wl->wl_rwlock);
609 1.1.4.1 haad }
610 1.1.4.1 haad
611 1.1.4.1 haad int
612 1.1.4.1 haad wapbl_stop(struct wapbl *wl, int force)
613 1.1.4.1 haad {
614 1.1.4.1 haad struct vnode *vp;
615 1.1.4.1 haad int error;
616 1.1.4.1 haad
617 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
618 1.1.4.1 haad error = wapbl_flush(wl, 1);
619 1.1.4.1 haad if (error) {
620 1.1.4.1 haad if (force)
621 1.1.4.1 haad wapbl_discard(wl);
622 1.1.4.1 haad else
623 1.1.4.1 haad return error;
624 1.1.4.1 haad }
625 1.1.4.1 haad
626 1.1.4.1 haad /* Unlinked inodes persist after a flush */
627 1.1.4.1 haad if (wl->wl_inohashcnt) {
628 1.1.4.1 haad if (force) {
629 1.1.4.1 haad wapbl_discard(wl);
630 1.1.4.1 haad } else {
631 1.1.4.1 haad return EBUSY;
632 1.1.4.1 haad }
633 1.1.4.1 haad }
634 1.1.4.1 haad
635 1.1.4.1 haad KASSERT(wl->wl_bufbytes == 0);
636 1.1.4.1 haad KASSERT(wl->wl_bcount == 0);
637 1.1.4.1 haad KASSERT(wl->wl_bufcount == 0);
638 1.1.4.1 haad KASSERT(LIST_EMPTY(&wl->wl_bufs));
639 1.1.4.1 haad KASSERT(wl->wl_dealloccnt == 0);
640 1.1.4.1 haad KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
641 1.1.4.1 haad KASSERT(wl->wl_inohashcnt == 0);
642 1.1.4.1 haad
643 1.1.4.1 haad vp = wl->wl_logvp;
644 1.1.4.1 haad
645 1.1.4.1 haad wapbl_free(wl->wl_wc_scratch);
646 1.1.4.1 haad wapbl_free(wl->wl_wc_header);
647 1.1.4.1 haad #if WAPBL_UVM_ALLOC
648 1.1.4.1 haad uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_deallocblks,
649 1.1.4.1 haad round_page(sizeof(*wl->wl_deallocblks *
650 1.1.4.1 haad wl->wl_dealloclim)));
651 1.1.4.1 haad uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_dealloclens,
652 1.1.4.1 haad round_page(sizeof(*wl->wl_dealloclens *
653 1.1.4.1 haad wl->wl_dealloclim)));
654 1.1.4.1 haad #else
655 1.1.4.1 haad wapbl_free(wl->wl_deallocblks);
656 1.1.4.1 haad wapbl_free(wl->wl_dealloclens);
657 1.1.4.1 haad #endif
658 1.1.4.1 haad wapbl_inodetrk_free(wl);
659 1.1.4.1 haad
660 1.1.4.1 haad cv_destroy(&wl->wl_reclaimable_cv);
661 1.1.4.1 haad mutex_destroy(&wl->wl_mtx);
662 1.1.4.1 haad rw_destroy(&wl->wl_rwlock);
663 1.1.4.1 haad wapbl_free(wl);
664 1.1.4.1 haad
665 1.1.4.1 haad return 0;
666 1.1.4.1 haad }
667 1.1.4.1 haad
668 1.1.4.1 haad static int
669 1.1.4.1 haad wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
670 1.1.4.1 haad {
671 1.1.4.1 haad struct pstats *pstats = curlwp->l_proc->p_stats;
672 1.1.4.1 haad struct buf *bp;
673 1.1.4.1 haad int error;
674 1.1.4.1 haad
675 1.1.4.1 haad KASSERT((flags & ~(B_WRITE | B_READ)) == 0);
676 1.1.4.1 haad KASSERT(devvp->v_type == VBLK);
677 1.1.4.1 haad
678 1.1.4.1 haad if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
679 1.1.4.1 haad mutex_enter(&devvp->v_interlock);
680 1.1.4.1 haad devvp->v_numoutput++;
681 1.1.4.1 haad mutex_exit(&devvp->v_interlock);
682 1.1.4.1 haad pstats->p_ru.ru_oublock++;
683 1.1.4.1 haad } else {
684 1.1.4.1 haad pstats->p_ru.ru_inblock++;
685 1.1.4.1 haad }
686 1.1.4.1 haad
687 1.1.4.1 haad bp = getiobuf(devvp, true);
688 1.1.4.1 haad bp->b_flags = flags;
689 1.1.4.1 haad bp->b_cflags = BC_BUSY; /* silly & dubious */
690 1.1.4.1 haad bp->b_dev = devvp->v_rdev;
691 1.1.4.1 haad bp->b_data = data;
692 1.1.4.1 haad bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
693 1.1.4.1 haad bp->b_blkno = pbn;
694 1.1.4.1 haad
695 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_IO,
696 1.1.4.1 haad ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%x\n",
697 1.1.4.1 haad BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
698 1.1.4.1 haad bp->b_blkno, bp->b_dev));
699 1.1.4.1 haad
700 1.1.4.1 haad VOP_STRATEGY(devvp, bp);
701 1.1.4.1 haad
702 1.1.4.1 haad error = biowait(bp);
703 1.1.4.1 haad putiobuf(bp);
704 1.1.4.1 haad
705 1.1.4.1 haad if (error) {
706 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_ERROR,
707 1.1.4.1 haad ("wapbl_doio: %s %zu bytes at block %" PRId64
708 1.1.4.1 haad " on dev 0x%x failed with error %d\n",
709 1.1.4.1 haad (((flags & (B_WRITE | B_READ)) == B_WRITE) ?
710 1.1.4.1 haad "write" : "read"),
711 1.1.4.1 haad len, pbn, devvp->v_rdev, error));
712 1.1.4.1 haad }
713 1.1.4.1 haad
714 1.1.4.1 haad return error;
715 1.1.4.1 haad }
716 1.1.4.1 haad
717 1.1.4.1 haad int
718 1.1.4.1 haad wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
719 1.1.4.1 haad {
720 1.1.4.1 haad
721 1.1.4.1 haad return wapbl_doio(data, len, devvp, pbn, B_WRITE);
722 1.1.4.1 haad }
723 1.1.4.1 haad
724 1.1.4.1 haad int
725 1.1.4.1 haad wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
726 1.1.4.1 haad {
727 1.1.4.1 haad
728 1.1.4.1 haad return wapbl_doio(data, len, devvp, pbn, B_READ);
729 1.1.4.1 haad }
730 1.1.4.1 haad
731 1.1.4.1 haad /*
732 1.1.4.1 haad * Off is byte offset returns new offset for next write
733 1.1.4.1 haad * handles log wraparound
734 1.1.4.1 haad */
735 1.1.4.1 haad static int
736 1.1.4.1 haad wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
737 1.1.4.1 haad {
738 1.1.4.1 haad size_t slen;
739 1.1.4.1 haad off_t off = *offp;
740 1.1.4.1 haad int error;
741 1.1.4.1 haad
742 1.1.4.1 haad KDASSERT(((len >> wl->wl_log_dev_bshift) <<
743 1.1.4.1 haad wl->wl_log_dev_bshift) == len);
744 1.1.4.1 haad
745 1.1.4.1 haad if (off < wl->wl_circ_off)
746 1.1.4.1 haad off = wl->wl_circ_off;
747 1.1.4.1 haad slen = wl->wl_circ_off + wl->wl_circ_size - off;
748 1.1.4.1 haad if (slen < len) {
749 1.1.4.1 haad error = wapbl_write(data, slen, wl->wl_devvp,
750 1.1.4.1 haad wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
751 1.1.4.1 haad if (error)
752 1.1.4.1 haad return error;
753 1.1.4.1 haad data = (uint8_t *)data + slen;
754 1.1.4.1 haad len -= slen;
755 1.1.4.1 haad off = wl->wl_circ_off;
756 1.1.4.1 haad }
757 1.1.4.1 haad error = wapbl_write(data, len, wl->wl_devvp,
758 1.1.4.1 haad wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
759 1.1.4.1 haad if (error)
760 1.1.4.1 haad return error;
761 1.1.4.1 haad off += len;
762 1.1.4.1 haad if (off >= wl->wl_circ_off + wl->wl_circ_size)
763 1.1.4.1 haad off = wl->wl_circ_off;
764 1.1.4.1 haad *offp = off;
765 1.1.4.1 haad return 0;
766 1.1.4.1 haad }
767 1.1.4.1 haad
768 1.1.4.1 haad /****************************************************************/
769 1.1.4.1 haad
770 1.1.4.1 haad int
771 1.1.4.1 haad wapbl_begin(struct wapbl *wl, const char *file, int line)
772 1.1.4.1 haad {
773 1.1.4.1 haad int doflush;
774 1.1.4.1 haad unsigned lockcount;
775 1.1.4.1 haad krw_t op;
776 1.1.4.1 haad
777 1.1.4.1 haad KDASSERT(wl);
778 1.1.4.1 haad
779 1.1.4.1 haad /*
780 1.1.4.1 haad * XXX: The original code calls for the use of a RW_READER lock
781 1.1.4.1 haad * here, but it turns out there are performance issues with high
782 1.1.4.1 haad * metadata-rate workloads (e.g. multiple simultaneous tar
783 1.1.4.1 haad * extractions). For now, we force the lock to be RW_WRITER,
784 1.1.4.1 haad * since that currently has the best performance characteristics
785 1.1.4.1 haad * (even for a single tar-file extraction).
786 1.1.4.1 haad *
787 1.1.4.1 haad */
788 1.1.4.1 haad #define WAPBL_DEBUG_SERIALIZE 1
789 1.1.4.1 haad
790 1.1.4.1 haad #ifdef WAPBL_DEBUG_SERIALIZE
791 1.1.4.1 haad op = RW_WRITER;
792 1.1.4.1 haad #else
793 1.1.4.1 haad op = RW_READER;
794 1.1.4.1 haad #endif
795 1.1.4.1 haad
796 1.1.4.1 haad /*
797 1.1.4.1 haad * XXX this needs to be made much more sophisticated.
798 1.1.4.1 haad * perhaps each wapbl_begin could reserve a specified
799 1.1.4.1 haad * number of buffers and bytes.
800 1.1.4.1 haad */
801 1.1.4.1 haad mutex_enter(&wl->wl_mtx);
802 1.1.4.1 haad lockcount = wl->wl_lock_count;
803 1.1.4.1 haad doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) >
804 1.1.4.1 haad wl->wl_bufbytes_max / 2) ||
805 1.1.4.1 haad ((wl->wl_bufcount + (lockcount * 10)) >
806 1.1.4.1 haad wl->wl_bufcount_max / 2) ||
807 1.1.4.1 haad (wapbl_transaction_len(wl) > wl->wl_circ_size / 2);
808 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
809 1.1.4.1 haad
810 1.1.4.1 haad if (doflush) {
811 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
812 1.1.4.1 haad ("force flush lockcnt=%d bufbytes=%zu "
813 1.1.4.1 haad "(max=%zu) bufcount=%zu (max=%zu)\n",
814 1.1.4.1 haad lockcount, wl->wl_bufbytes,
815 1.1.4.1 haad wl->wl_bufbytes_max, wl->wl_bufcount,
816 1.1.4.1 haad wl->wl_bufcount_max));
817 1.1.4.1 haad }
818 1.1.4.1 haad
819 1.1.4.1 haad if (doflush) {
820 1.1.4.1 haad int error = wapbl_flush(wl, 0);
821 1.1.4.1 haad if (error)
822 1.1.4.1 haad return error;
823 1.1.4.1 haad }
824 1.1.4.1 haad
825 1.1.4.1 haad rw_enter(&wl->wl_rwlock, op);
826 1.1.4.1 haad mutex_enter(&wl->wl_mtx);
827 1.1.4.1 haad wl->wl_lock_count++;
828 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
829 1.1.4.1 haad
830 1.1.4.1 haad #if defined(WAPBL_DEBUG_PRINT) && defined(WAPBL_DEBUG_SERIALIZE)
831 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
832 1.1.4.1 haad ("wapbl_begin thread %d.%d with bufcount=%zu "
833 1.1.4.1 haad "bufbytes=%zu bcount=%zu at %s:%d\n",
834 1.1.4.1 haad curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
835 1.1.4.1 haad wl->wl_bufbytes, wl->wl_bcount, file, line));
836 1.1.4.1 haad #endif
837 1.1.4.1 haad
838 1.1.4.1 haad return 0;
839 1.1.4.1 haad }
840 1.1.4.1 haad
841 1.1.4.1 haad void
842 1.1.4.1 haad wapbl_end(struct wapbl *wl)
843 1.1.4.1 haad {
844 1.1.4.1 haad
845 1.1.4.1 haad #if defined(WAPBL_DEBUG_PRINT) && defined(WAPBL_DEBUG_SERIALIZE)
846 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
847 1.1.4.1 haad ("wapbl_end thread %d.%d with bufcount=%zu "
848 1.1.4.1 haad "bufbytes=%zu bcount=%zu\n",
849 1.1.4.1 haad curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
850 1.1.4.1 haad wl->wl_bufbytes, wl->wl_bcount));
851 1.1.4.1 haad #endif
852 1.1.4.1 haad
853 1.1.4.1 haad mutex_enter(&wl->wl_mtx);
854 1.1.4.1 haad KASSERT(wl->wl_lock_count > 0);
855 1.1.4.1 haad wl->wl_lock_count--;
856 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
857 1.1.4.1 haad
858 1.1.4.1 haad rw_exit(&wl->wl_rwlock);
859 1.1.4.1 haad }
860 1.1.4.1 haad
861 1.1.4.1 haad void
862 1.1.4.1 haad wapbl_add_buf(struct wapbl *wl, struct buf * bp)
863 1.1.4.1 haad {
864 1.1.4.1 haad
865 1.1.4.1 haad KASSERT(bp->b_cflags & BC_BUSY);
866 1.1.4.1 haad KASSERT(bp->b_vp);
867 1.1.4.1 haad
868 1.1.4.1 haad wapbl_jlock_assert(wl);
869 1.1.4.1 haad
870 1.1.4.1 haad #if 0
871 1.1.4.1 haad /*
872 1.1.4.1 haad * XXX this might be an issue for swapfiles.
873 1.1.4.1 haad * see uvm_swap.c:1702
874 1.1.4.1 haad *
875 1.1.4.1 haad * XXX2 why require it then? leap of semantics?
876 1.1.4.1 haad */
877 1.1.4.1 haad KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
878 1.1.4.1 haad #endif
879 1.1.4.1 haad
880 1.1.4.1 haad mutex_enter(&wl->wl_mtx);
881 1.1.4.1 haad if (bp->b_flags & B_LOCKED) {
882 1.1.4.1 haad LIST_REMOVE(bp, b_wapbllist);
883 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
884 1.1.4.1 haad ("wapbl_add_buf thread %d.%d re-adding buf %p "
885 1.1.4.1 haad "with %d bytes %d bcount\n",
886 1.1.4.1 haad curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
887 1.1.4.1 haad bp->b_bcount));
888 1.1.4.1 haad } else {
889 1.1.4.1 haad /* unlocked by dirty buffers shouldn't exist */
890 1.1.4.1 haad KASSERT(!(bp->b_oflags & BO_DELWRI));
891 1.1.4.1 haad wl->wl_bufbytes += bp->b_bufsize;
892 1.1.4.1 haad wl->wl_bcount += bp->b_bcount;
893 1.1.4.1 haad wl->wl_bufcount++;
894 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
895 1.1.4.1 haad ("wapbl_add_buf thread %d.%d adding buf %p "
896 1.1.4.1 haad "with %d bytes %d bcount\n",
897 1.1.4.1 haad curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
898 1.1.4.1 haad bp->b_bcount));
899 1.1.4.1 haad }
900 1.1.4.1 haad LIST_INSERT_HEAD(&wl->wl_bufs, bp, b_wapbllist);
901 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
902 1.1.4.1 haad
903 1.1.4.1 haad bp->b_flags |= B_LOCKED;
904 1.1.4.1 haad }
905 1.1.4.1 haad
906 1.1.4.1 haad static void
907 1.1.4.1 haad wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
908 1.1.4.1 haad {
909 1.1.4.1 haad
910 1.1.4.1 haad KASSERT(mutex_owned(&wl->wl_mtx));
911 1.1.4.1 haad KASSERT(bp->b_cflags & BC_BUSY);
912 1.1.4.1 haad wapbl_jlock_assert(wl);
913 1.1.4.1 haad
914 1.1.4.1 haad #if 0
915 1.1.4.1 haad /*
916 1.1.4.1 haad * XXX this might be an issue for swapfiles.
917 1.1.4.1 haad * see uvm_swap.c:1725
918 1.1.4.1 haad *
919 1.1.4.1 haad * XXXdeux: see above
920 1.1.4.1 haad */
921 1.1.4.1 haad KASSERT((bp->b_flags & BC_NOCACHE) == 0);
922 1.1.4.1 haad #endif
923 1.1.4.1 haad KASSERT(bp->b_flags & B_LOCKED);
924 1.1.4.1 haad
925 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
926 1.1.4.1 haad ("wapbl_remove_buf thread %d.%d removing buf %p with "
927 1.1.4.1 haad "%d bytes %d bcount\n",
928 1.1.4.1 haad curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, bp->b_bcount));
929 1.1.4.1 haad
930 1.1.4.1 haad KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
931 1.1.4.1 haad wl->wl_bufbytes -= bp->b_bufsize;
932 1.1.4.1 haad KASSERT(wl->wl_bcount >= bp->b_bcount);
933 1.1.4.1 haad wl->wl_bcount -= bp->b_bcount;
934 1.1.4.1 haad KASSERT(wl->wl_bufcount > 0);
935 1.1.4.1 haad wl->wl_bufcount--;
936 1.1.4.1 haad KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
937 1.1.4.1 haad KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
938 1.1.4.1 haad LIST_REMOVE(bp, b_wapbllist);
939 1.1.4.1 haad
940 1.1.4.1 haad bp->b_flags &= ~B_LOCKED;
941 1.1.4.1 haad }
942 1.1.4.1 haad
943 1.1.4.1 haad /* called from brelsel() in vfs_bio among other places */
944 1.1.4.1 haad void
945 1.1.4.1 haad wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
946 1.1.4.1 haad {
947 1.1.4.1 haad
948 1.1.4.1 haad mutex_enter(&wl->wl_mtx);
949 1.1.4.1 haad wapbl_remove_buf_locked(wl, bp);
950 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
951 1.1.4.1 haad }
952 1.1.4.1 haad
953 1.1.4.1 haad void
954 1.1.4.1 haad wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
955 1.1.4.1 haad {
956 1.1.4.1 haad
957 1.1.4.1 haad KASSERT(bp->b_cflags & BC_BUSY);
958 1.1.4.1 haad
959 1.1.4.1 haad /*
960 1.1.4.1 haad * XXX: why does this depend on B_LOCKED? otherwise the buf
961 1.1.4.1 haad * is not for a transaction? if so, why is this called in the
962 1.1.4.1 haad * first place?
963 1.1.4.1 haad */
964 1.1.4.1 haad if (bp->b_flags & B_LOCKED) {
965 1.1.4.1 haad mutex_enter(&wl->wl_mtx);
966 1.1.4.1 haad wl->wl_bufbytes += bp->b_bufsize - oldsz;
967 1.1.4.1 haad wl->wl_bcount += bp->b_bcount - oldcnt;
968 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
969 1.1.4.1 haad }
970 1.1.4.1 haad }
971 1.1.4.1 haad
972 1.1.4.1 haad #endif /* _KERNEL */
973 1.1.4.1 haad
974 1.1.4.1 haad /****************************************************************/
975 1.1.4.1 haad /* Some utility inlines */
976 1.1.4.1 haad
977 1.1.4.1 haad /* This is used to advance the pointer at old to new value at old+delta */
978 1.1.4.1 haad static __inline off_t
979 1.1.4.1 haad wapbl_advance(size_t size, size_t off, off_t old, size_t delta)
980 1.1.4.1 haad {
981 1.1.4.1 haad off_t new;
982 1.1.4.1 haad
983 1.1.4.1 haad /* Define acceptable ranges for inputs. */
984 1.1.4.1 haad KASSERT(delta <= size);
985 1.1.4.1 haad KASSERT((old == 0) || (old >= off));
986 1.1.4.1 haad KASSERT(old < (size + off));
987 1.1.4.1 haad
988 1.1.4.1 haad if ((old == 0) && (delta != 0))
989 1.1.4.1 haad new = off + delta;
990 1.1.4.1 haad else if ((old + delta) < (size + off))
991 1.1.4.1 haad new = old + delta;
992 1.1.4.1 haad else
993 1.1.4.1 haad new = (old + delta) - size;
994 1.1.4.1 haad
995 1.1.4.1 haad /* Note some interesting axioms */
996 1.1.4.1 haad KASSERT((delta != 0) || (new == old));
997 1.1.4.1 haad KASSERT((delta == 0) || (new != 0));
998 1.1.4.1 haad KASSERT((delta != (size)) || (new == old));
999 1.1.4.1 haad
1000 1.1.4.1 haad /* Define acceptable ranges for output. */
1001 1.1.4.1 haad KASSERT((new == 0) || (new >= off));
1002 1.1.4.1 haad KASSERT(new < (size + off));
1003 1.1.4.1 haad return new;
1004 1.1.4.1 haad }
1005 1.1.4.1 haad
1006 1.1.4.1 haad static __inline size_t
1007 1.1.4.1 haad wapbl_space_used(size_t avail, off_t head, off_t tail)
1008 1.1.4.1 haad {
1009 1.1.4.1 haad
1010 1.1.4.1 haad if (tail == 0) {
1011 1.1.4.1 haad KASSERT(head == 0);
1012 1.1.4.1 haad return 0;
1013 1.1.4.1 haad }
1014 1.1.4.1 haad return ((head + (avail - 1) - tail) % avail) + 1;
1015 1.1.4.1 haad }
1016 1.1.4.1 haad
1017 1.1.4.1 haad static __inline size_t
1018 1.1.4.1 haad wapbl_space_free(size_t avail, off_t head, off_t tail)
1019 1.1.4.1 haad {
1020 1.1.4.1 haad
1021 1.1.4.1 haad return avail - wapbl_space_used(avail, head, tail);
1022 1.1.4.1 haad }
1023 1.1.4.1 haad
1024 1.1.4.1 haad static __inline void
1025 1.1.4.1 haad wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1026 1.1.4.1 haad off_t *tailp)
1027 1.1.4.1 haad {
1028 1.1.4.1 haad off_t head = *headp;
1029 1.1.4.1 haad off_t tail = *tailp;
1030 1.1.4.1 haad
1031 1.1.4.1 haad KASSERT(delta <= wapbl_space_free(size, head, tail));
1032 1.1.4.1 haad head = wapbl_advance(size, off, head, delta);
1033 1.1.4.1 haad if ((tail == 0) && (head != 0))
1034 1.1.4.1 haad tail = off;
1035 1.1.4.1 haad *headp = head;
1036 1.1.4.1 haad *tailp = tail;
1037 1.1.4.1 haad }
1038 1.1.4.1 haad
1039 1.1.4.1 haad static __inline void
1040 1.1.4.1 haad wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1041 1.1.4.1 haad off_t *tailp)
1042 1.1.4.1 haad {
1043 1.1.4.1 haad off_t head = *headp;
1044 1.1.4.1 haad off_t tail = *tailp;
1045 1.1.4.1 haad
1046 1.1.4.1 haad KASSERT(delta <= wapbl_space_used(size, head, tail));
1047 1.1.4.1 haad tail = wapbl_advance(size, off, tail, delta);
1048 1.1.4.1 haad if (head == tail) {
1049 1.1.4.1 haad head = tail = 0;
1050 1.1.4.1 haad }
1051 1.1.4.1 haad *headp = head;
1052 1.1.4.1 haad *tailp = tail;
1053 1.1.4.1 haad }
1054 1.1.4.1 haad
1055 1.1.4.1 haad #ifdef _KERNEL
1056 1.1.4.1 haad
1057 1.1.4.1 haad /****************************************************************/
1058 1.1.4.1 haad
1059 1.1.4.1 haad /*
1060 1.1.4.1 haad * Remove transactions whose buffers are completely flushed to disk.
1061 1.1.4.1 haad * Will block until at least minfree space is available.
1062 1.1.4.1 haad * only intended to be called from inside wapbl_flush and therefore
1063 1.1.4.1 haad * does not protect against commit races with itself or with flush.
1064 1.1.4.1 haad */
1065 1.1.4.1 haad static int
1066 1.1.4.1 haad wapbl_truncate(struct wapbl *wl, size_t minfree, int waitonly)
1067 1.1.4.1 haad {
1068 1.1.4.1 haad size_t delta;
1069 1.1.4.1 haad size_t avail;
1070 1.1.4.1 haad off_t head;
1071 1.1.4.1 haad off_t tail;
1072 1.1.4.1 haad int error = 0;
1073 1.1.4.1 haad
1074 1.1.4.1 haad KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1075 1.1.4.1 haad KASSERT(rw_write_held(&wl->wl_rwlock));
1076 1.1.4.1 haad
1077 1.1.4.1 haad mutex_enter(&wl->wl_mtx);
1078 1.1.4.1 haad
1079 1.1.4.1 haad /*
1080 1.1.4.1 haad * First check to see if we have to do a commit
1081 1.1.4.1 haad * at all.
1082 1.1.4.1 haad */
1083 1.1.4.1 haad avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1084 1.1.4.1 haad if (minfree < avail) {
1085 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
1086 1.1.4.1 haad return 0;
1087 1.1.4.1 haad }
1088 1.1.4.1 haad minfree -= avail;
1089 1.1.4.1 haad while ((wl->wl_error_count == 0) &&
1090 1.1.4.1 haad (wl->wl_reclaimable_bytes < minfree)) {
1091 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1092 1.1.4.1 haad ("wapbl_truncate: sleeping on %p wl=%p bytes=%zd "
1093 1.1.4.1 haad "minfree=%zd\n",
1094 1.1.4.1 haad &wl->wl_reclaimable_bytes, wl, wl->wl_reclaimable_bytes,
1095 1.1.4.1 haad minfree));
1096 1.1.4.1 haad
1097 1.1.4.1 haad cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1098 1.1.4.1 haad }
1099 1.1.4.1 haad if (wl->wl_reclaimable_bytes < minfree) {
1100 1.1.4.1 haad KASSERT(wl->wl_error_count);
1101 1.1.4.1 haad /* XXX maybe get actual error from buffer instead someday? */
1102 1.1.4.1 haad error = EIO;
1103 1.1.4.1 haad }
1104 1.1.4.1 haad head = wl->wl_head;
1105 1.1.4.1 haad tail = wl->wl_tail;
1106 1.1.4.1 haad delta = wl->wl_reclaimable_bytes;
1107 1.1.4.1 haad
1108 1.1.4.1 haad /* If all of of the entries are flushed, then be sure to keep
1109 1.1.4.1 haad * the reserved bytes reserved. Watch out for discarded transactions,
1110 1.1.4.1 haad * which could leave more bytes reserved than are reclaimable.
1111 1.1.4.1 haad */
1112 1.1.4.1 haad if (SIMPLEQ_EMPTY(&wl->wl_entries) &&
1113 1.1.4.1 haad (delta >= wl->wl_reserved_bytes)) {
1114 1.1.4.1 haad delta -= wl->wl_reserved_bytes;
1115 1.1.4.1 haad }
1116 1.1.4.1 haad wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1117 1.1.4.1 haad &tail);
1118 1.1.4.1 haad KDASSERT(wl->wl_reserved_bytes <=
1119 1.1.4.1 haad wapbl_space_used(wl->wl_circ_size, head, tail));
1120 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
1121 1.1.4.1 haad
1122 1.1.4.1 haad if (error)
1123 1.1.4.1 haad return error;
1124 1.1.4.1 haad
1125 1.1.4.1 haad if (waitonly)
1126 1.1.4.1 haad return 0;
1127 1.1.4.1 haad
1128 1.1.4.1 haad /*
1129 1.1.4.1 haad * This is where head, tail and delta are unprotected
1130 1.1.4.1 haad * from races against itself or flush. This is ok since
1131 1.1.4.1 haad * we only call this routine from inside flush itself.
1132 1.1.4.1 haad *
1133 1.1.4.1 haad * XXX: how can it race against itself when accessed only
1134 1.1.4.1 haad * from behind the write-locked rwlock?
1135 1.1.4.1 haad */
1136 1.1.4.1 haad error = wapbl_write_commit(wl, head, tail);
1137 1.1.4.1 haad if (error)
1138 1.1.4.1 haad return error;
1139 1.1.4.1 haad
1140 1.1.4.1 haad wl->wl_head = head;
1141 1.1.4.1 haad wl->wl_tail = tail;
1142 1.1.4.1 haad
1143 1.1.4.1 haad mutex_enter(&wl->wl_mtx);
1144 1.1.4.1 haad KASSERT(wl->wl_reclaimable_bytes >= delta);
1145 1.1.4.1 haad wl->wl_reclaimable_bytes -= delta;
1146 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
1147 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1148 1.1.4.1 haad ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1149 1.1.4.1 haad curproc->p_pid, curlwp->l_lid, delta));
1150 1.1.4.1 haad
1151 1.1.4.1 haad return 0;
1152 1.1.4.1 haad }
1153 1.1.4.1 haad
1154 1.1.4.1 haad /****************************************************************/
1155 1.1.4.1 haad
1156 1.1.4.1 haad void
1157 1.1.4.1 haad wapbl_biodone(struct buf *bp)
1158 1.1.4.1 haad {
1159 1.1.4.1 haad struct wapbl_entry *we = bp->b_private;
1160 1.1.4.1 haad struct wapbl *wl = we->we_wapbl;
1161 1.1.4.1 haad
1162 1.1.4.1 haad /*
1163 1.1.4.1 haad * Handle possible flushing of buffers after log has been
1164 1.1.4.1 haad * decomissioned.
1165 1.1.4.1 haad */
1166 1.1.4.1 haad if (!wl) {
1167 1.1.4.1 haad KASSERT(we->we_bufcount > 0);
1168 1.1.4.1 haad we->we_bufcount--;
1169 1.1.4.1 haad #ifdef WAPBL_DEBUG_BUFBYTES
1170 1.1.4.1 haad KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1171 1.1.4.1 haad we->we_unsynced_bufbytes -= bp->b_bufsize;
1172 1.1.4.1 haad #endif
1173 1.1.4.1 haad
1174 1.1.4.1 haad if (we->we_bufcount == 0) {
1175 1.1.4.1 haad #ifdef WAPBL_DEBUG_BUFBYTES
1176 1.1.4.1 haad KASSERT(we->we_unsynced_bufbytes == 0);
1177 1.1.4.1 haad #endif
1178 1.1.4.1 haad wapbl_free(we);
1179 1.1.4.1 haad }
1180 1.1.4.1 haad
1181 1.1.4.1 haad brelse(bp, 0);
1182 1.1.4.1 haad return;
1183 1.1.4.1 haad }
1184 1.1.4.1 haad
1185 1.1.4.1 haad #ifdef ohbother
1186 1.1.4.1 haad KDASSERT(bp->b_flags & B_DONE);
1187 1.1.4.1 haad KDASSERT(!(bp->b_flags & B_DELWRI));
1188 1.1.4.1 haad KDASSERT(bp->b_flags & B_ASYNC);
1189 1.1.4.1 haad KDASSERT(bp->b_flags & B_BUSY);
1190 1.1.4.1 haad KDASSERT(!(bp->b_flags & B_LOCKED));
1191 1.1.4.1 haad KDASSERT(!(bp->b_flags & B_READ));
1192 1.1.4.1 haad KDASSERT(!(bp->b_flags & B_INVAL));
1193 1.1.4.1 haad KDASSERT(!(bp->b_flags & B_NOCACHE));
1194 1.1.4.1 haad #endif
1195 1.1.4.1 haad
1196 1.1.4.1 haad if (bp->b_error) {
1197 1.1.4.1 haad #ifdef notyet /* Can't currently handle possible dirty buffer reuse */
1198 1.1.4.1 haad XXXpooka: interfaces not fully updated
1199 1.1.4.1 haad Note: this was not enabled in the original patch
1200 1.1.4.1 haad against netbsd4 either. I don't know if comment
1201 1.1.4.1 haad above is true or not.
1202 1.1.4.1 haad
1203 1.1.4.1 haad /*
1204 1.1.4.1 haad * If an error occurs, report the error and leave the
1205 1.1.4.1 haad * buffer as a delayed write on the LRU queue.
1206 1.1.4.1 haad * restarting the write would likely result in
1207 1.1.4.1 haad * an error spinloop, so let it be done harmlessly
1208 1.1.4.1 haad * by the syncer.
1209 1.1.4.1 haad */
1210 1.1.4.1 haad bp->b_flags &= ~(B_DONE);
1211 1.1.4.1 haad simple_unlock(&bp->b_interlock);
1212 1.1.4.1 haad
1213 1.1.4.1 haad if (we->we_error == 0) {
1214 1.1.4.1 haad mutex_enter(&wl->wl_mtx);
1215 1.1.4.1 haad wl->wl_error_count++;
1216 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
1217 1.1.4.1 haad cv_broadcast(&wl->wl_reclaimable_cv);
1218 1.1.4.1 haad }
1219 1.1.4.1 haad we->we_error = bp->b_error;
1220 1.1.4.1 haad bp->b_error = 0;
1221 1.1.4.1 haad brelse(bp);
1222 1.1.4.1 haad return;
1223 1.1.4.1 haad #else
1224 1.1.4.1 haad /* For now, just mark the log permanently errored out */
1225 1.1.4.1 haad
1226 1.1.4.1 haad mutex_enter(&wl->wl_mtx);
1227 1.1.4.1 haad if (wl->wl_error_count == 0) {
1228 1.1.4.1 haad wl->wl_error_count++;
1229 1.1.4.1 haad cv_broadcast(&wl->wl_reclaimable_cv);
1230 1.1.4.1 haad }
1231 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
1232 1.1.4.1 haad #endif
1233 1.1.4.1 haad }
1234 1.1.4.1 haad
1235 1.1.4.1 haad mutex_enter(&wl->wl_mtx);
1236 1.1.4.1 haad
1237 1.1.4.1 haad KASSERT(we->we_bufcount > 0);
1238 1.1.4.1 haad we->we_bufcount--;
1239 1.1.4.1 haad #ifdef WAPBL_DEBUG_BUFBYTES
1240 1.1.4.1 haad KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1241 1.1.4.1 haad we->we_unsynced_bufbytes -= bp->b_bufsize;
1242 1.1.4.1 haad KASSERT(wl->wl_unsynced_bufbytes >= bp->b_bufsize);
1243 1.1.4.1 haad wl->wl_unsynced_bufbytes -= bp->b_bufsize;
1244 1.1.4.1 haad #endif
1245 1.1.4.1 haad
1246 1.1.4.1 haad /*
1247 1.1.4.1 haad * If the current transaction can be reclaimed, start
1248 1.1.4.1 haad * at the beginning and reclaim any consecutive reclaimable
1249 1.1.4.1 haad * transactions. If we successfully reclaim anything,
1250 1.1.4.1 haad * then wakeup anyone waiting for the reclaim.
1251 1.1.4.1 haad */
1252 1.1.4.1 haad if (we->we_bufcount == 0) {
1253 1.1.4.1 haad size_t delta = 0;
1254 1.1.4.1 haad int errcnt = 0;
1255 1.1.4.1 haad #ifdef WAPBL_DEBUG_BUFBYTES
1256 1.1.4.1 haad KDASSERT(we->we_unsynced_bufbytes == 0);
1257 1.1.4.1 haad #endif
1258 1.1.4.1 haad /*
1259 1.1.4.1 haad * clear any posted error, since the buffer it came from
1260 1.1.4.1 haad * has successfully flushed by now
1261 1.1.4.1 haad */
1262 1.1.4.1 haad while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1263 1.1.4.1 haad (we->we_bufcount == 0)) {
1264 1.1.4.1 haad delta += we->we_reclaimable_bytes;
1265 1.1.4.1 haad if (we->we_error)
1266 1.1.4.1 haad errcnt++;
1267 1.1.4.1 haad SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1268 1.1.4.1 haad wapbl_free(we);
1269 1.1.4.1 haad }
1270 1.1.4.1 haad
1271 1.1.4.1 haad if (delta) {
1272 1.1.4.1 haad wl->wl_reclaimable_bytes += delta;
1273 1.1.4.1 haad KASSERT(wl->wl_error_count >= errcnt);
1274 1.1.4.1 haad wl->wl_error_count -= errcnt;
1275 1.1.4.1 haad cv_broadcast(&wl->wl_reclaimable_cv);
1276 1.1.4.1 haad }
1277 1.1.4.1 haad }
1278 1.1.4.1 haad
1279 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
1280 1.1.4.1 haad brelse(bp, 0);
1281 1.1.4.1 haad }
1282 1.1.4.1 haad
1283 1.1.4.1 haad /*
1284 1.1.4.1 haad * Write transactions to disk + start I/O for contents
1285 1.1.4.1 haad */
1286 1.1.4.1 haad int
1287 1.1.4.1 haad wapbl_flush(struct wapbl *wl, int waitfor)
1288 1.1.4.1 haad {
1289 1.1.4.1 haad struct buf *bp;
1290 1.1.4.1 haad struct wapbl_entry *we;
1291 1.1.4.1 haad off_t off;
1292 1.1.4.1 haad off_t head;
1293 1.1.4.1 haad off_t tail;
1294 1.1.4.1 haad size_t delta = 0;
1295 1.1.4.1 haad size_t flushsize;
1296 1.1.4.1 haad size_t reserved;
1297 1.1.4.1 haad int error = 0;
1298 1.1.4.1 haad
1299 1.1.4.1 haad /*
1300 1.1.4.1 haad * Do a quick check to see if a full flush can be skipped
1301 1.1.4.1 haad * This assumes that the flush callback does not need to be called
1302 1.1.4.1 haad * unless there are other outstanding bufs.
1303 1.1.4.1 haad */
1304 1.1.4.1 haad if (!waitfor) {
1305 1.1.4.1 haad size_t nbufs;
1306 1.1.4.1 haad mutex_enter(&wl->wl_mtx); /* XXX need mutex here to
1307 1.1.4.1 haad protect the KASSERTS */
1308 1.1.4.1 haad nbufs = wl->wl_bufcount;
1309 1.1.4.1 haad KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1310 1.1.4.1 haad KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1311 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
1312 1.1.4.1 haad if (nbufs == 0)
1313 1.1.4.1 haad return 0;
1314 1.1.4.1 haad }
1315 1.1.4.1 haad
1316 1.1.4.1 haad /*
1317 1.1.4.1 haad * XXX we may consider using LK_UPGRADE here
1318 1.1.4.1 haad * if we want to call flush from inside a transaction
1319 1.1.4.1 haad */
1320 1.1.4.1 haad rw_enter(&wl->wl_rwlock, RW_WRITER);
1321 1.1.4.1 haad wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
1322 1.1.4.1 haad wl->wl_dealloccnt);
1323 1.1.4.1 haad
1324 1.1.4.1 haad /*
1325 1.1.4.1 haad * Now that we are fully locked and flushed,
1326 1.1.4.1 haad * do another check for nothing to do.
1327 1.1.4.1 haad */
1328 1.1.4.1 haad if (wl->wl_bufcount == 0) {
1329 1.1.4.1 haad goto out;
1330 1.1.4.1 haad }
1331 1.1.4.1 haad
1332 1.1.4.1 haad #if 0
1333 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1334 1.1.4.1 haad ("wapbl_flush thread %d.%d flushing entries with "
1335 1.1.4.1 haad "bufcount=%zu bufbytes=%zu\n",
1336 1.1.4.1 haad curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1337 1.1.4.1 haad wl->wl_bufbytes));
1338 1.1.4.1 haad #endif
1339 1.1.4.1 haad
1340 1.1.4.1 haad /* Calculate amount of space needed to flush */
1341 1.1.4.1 haad flushsize = wapbl_transaction_len(wl);
1342 1.1.4.1 haad
1343 1.1.4.1 haad if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1344 1.1.4.1 haad /*
1345 1.1.4.1 haad * XXX this could be handled more gracefully, perhaps place
1346 1.1.4.1 haad * only a partial transaction in the log and allow the
1347 1.1.4.1 haad * remaining to flush without the protection of the journal.
1348 1.1.4.1 haad */
1349 1.1.4.1 haad panic("wapbl_flush: current transaction too big to flush\n");
1350 1.1.4.1 haad }
1351 1.1.4.1 haad
1352 1.1.4.1 haad error = wapbl_truncate(wl, flushsize, 0);
1353 1.1.4.1 haad if (error)
1354 1.1.4.1 haad goto out2;
1355 1.1.4.1 haad
1356 1.1.4.1 haad off = wl->wl_head;
1357 1.1.4.1 haad KASSERT((off == 0) || ((off >= wl->wl_circ_off) &&
1358 1.1.4.1 haad (off < wl->wl_circ_off + wl->wl_circ_size)));
1359 1.1.4.1 haad error = wapbl_write_blocks(wl, &off);
1360 1.1.4.1 haad if (error)
1361 1.1.4.1 haad goto out2;
1362 1.1.4.1 haad error = wapbl_write_revocations(wl, &off);
1363 1.1.4.1 haad if (error)
1364 1.1.4.1 haad goto out2;
1365 1.1.4.1 haad error = wapbl_write_inodes(wl, &off);
1366 1.1.4.1 haad if (error)
1367 1.1.4.1 haad goto out2;
1368 1.1.4.1 haad
1369 1.1.4.1 haad reserved = 0;
1370 1.1.4.1 haad if (wl->wl_inohashcnt)
1371 1.1.4.1 haad reserved = wapbl_transaction_inodes_len(wl);
1372 1.1.4.1 haad
1373 1.1.4.1 haad head = wl->wl_head;
1374 1.1.4.1 haad tail = wl->wl_tail;
1375 1.1.4.1 haad
1376 1.1.4.1 haad wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1377 1.1.4.1 haad &head, &tail);
1378 1.1.4.1 haad #ifdef WAPBL_DEBUG
1379 1.1.4.1 haad if (head != off) {
1380 1.1.4.1 haad panic("lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1381 1.1.4.1 haad " off=%"PRIdMAX" flush=%zu\n",
1382 1.1.4.1 haad (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1383 1.1.4.1 haad flushsize);
1384 1.1.4.1 haad }
1385 1.1.4.1 haad #else
1386 1.1.4.1 haad KASSERT(head == off);
1387 1.1.4.1 haad #endif
1388 1.1.4.1 haad
1389 1.1.4.1 haad /* Opportunistically move the tail forward if we can */
1390 1.1.4.1 haad if (!wapbl_lazy_truncate) {
1391 1.1.4.1 haad mutex_enter(&wl->wl_mtx);
1392 1.1.4.1 haad delta = wl->wl_reclaimable_bytes;
1393 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
1394 1.1.4.1 haad wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1395 1.1.4.1 haad &head, &tail);
1396 1.1.4.1 haad }
1397 1.1.4.1 haad
1398 1.1.4.1 haad error = wapbl_write_commit(wl, head, tail);
1399 1.1.4.1 haad if (error)
1400 1.1.4.1 haad goto out2;
1401 1.1.4.1 haad
1402 1.1.4.1 haad /* poolme? or kmemme? */
1403 1.1.4.1 haad we = wapbl_calloc(1, sizeof(*we));
1404 1.1.4.1 haad
1405 1.1.4.1 haad #ifdef WAPBL_DEBUG_BUFBYTES
1406 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1407 1.1.4.1 haad ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1408 1.1.4.1 haad " unsynced=%zu"
1409 1.1.4.1 haad "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1410 1.1.4.1 haad "inodes=%d\n",
1411 1.1.4.1 haad curproc->p_pid, curlwp->l_lid, flushsize, delta,
1412 1.1.4.1 haad wapbl_space_used(wl->wl_circ_size, head, tail),
1413 1.1.4.1 haad wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1414 1.1.4.1 haad wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1415 1.1.4.1 haad wl->wl_inohashcnt));
1416 1.1.4.1 haad #else
1417 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1418 1.1.4.1 haad ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1419 1.1.4.1 haad "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1420 1.1.4.1 haad "inodes=%d\n",
1421 1.1.4.1 haad curproc->p_pid, curlwp->l_lid, flushsize, delta,
1422 1.1.4.1 haad wapbl_space_used(wl->wl_circ_size, head, tail),
1423 1.1.4.1 haad wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1424 1.1.4.1 haad wl->wl_dealloccnt, wl->wl_inohashcnt));
1425 1.1.4.1 haad #endif
1426 1.1.4.1 haad
1427 1.1.4.1 haad
1428 1.1.4.1 haad mutex_enter(&bufcache_lock);
1429 1.1.4.1 haad mutex_enter(&wl->wl_mtx);
1430 1.1.4.1 haad
1431 1.1.4.1 haad wl->wl_reserved_bytes = reserved;
1432 1.1.4.1 haad wl->wl_head = head;
1433 1.1.4.1 haad wl->wl_tail = tail;
1434 1.1.4.1 haad KASSERT(wl->wl_reclaimable_bytes >= delta);
1435 1.1.4.1 haad wl->wl_reclaimable_bytes -= delta;
1436 1.1.4.1 haad wl->wl_dealloccnt = 0;
1437 1.1.4.1 haad #ifdef WAPBL_DEBUG_BUFBYTES
1438 1.1.4.1 haad wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1439 1.1.4.1 haad #endif
1440 1.1.4.1 haad
1441 1.1.4.1 haad we->we_wapbl = wl;
1442 1.1.4.1 haad we->we_bufcount = wl->wl_bufcount;
1443 1.1.4.1 haad #ifdef WAPBL_DEBUG_BUFBYTES
1444 1.1.4.1 haad we->we_unsynced_bufbytes = wl->wl_bufbytes;
1445 1.1.4.1 haad #endif
1446 1.1.4.1 haad we->we_reclaimable_bytes = flushsize;
1447 1.1.4.1 haad we->we_error = 0;
1448 1.1.4.1 haad SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1449 1.1.4.1 haad
1450 1.1.4.1 haad /*
1451 1.1.4.1 haad * this flushes bufs in reverse order than they were queued
1452 1.1.4.1 haad * it shouldn't matter, but if we care we could use TAILQ instead.
1453 1.1.4.1 haad * XXX Note they will get put on the lru queue when they flush
1454 1.1.4.1 haad * so we might actually want to change this to preserve order.
1455 1.1.4.1 haad */
1456 1.1.4.1 haad while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
1457 1.1.4.1 haad if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1458 1.1.4.1 haad continue;
1459 1.1.4.1 haad }
1460 1.1.4.1 haad bp->b_iodone = wapbl_biodone;
1461 1.1.4.1 haad bp->b_private = we;
1462 1.1.4.1 haad bremfree(bp);
1463 1.1.4.1 haad wapbl_remove_buf_locked(wl, bp);
1464 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
1465 1.1.4.1 haad mutex_exit(&bufcache_lock);
1466 1.1.4.1 haad bawrite(bp);
1467 1.1.4.1 haad mutex_enter(&bufcache_lock);
1468 1.1.4.1 haad mutex_enter(&wl->wl_mtx);
1469 1.1.4.1 haad }
1470 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
1471 1.1.4.1 haad mutex_exit(&bufcache_lock);
1472 1.1.4.1 haad
1473 1.1.4.1 haad #if 0
1474 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1475 1.1.4.1 haad ("wapbl_flush thread %d.%d done flushing entries...\n",
1476 1.1.4.1 haad curproc->p_pid, curlwp->l_lid));
1477 1.1.4.1 haad #endif
1478 1.1.4.1 haad
1479 1.1.4.1 haad out:
1480 1.1.4.1 haad
1481 1.1.4.1 haad /*
1482 1.1.4.1 haad * If the waitfor flag is set, don't return until everything is
1483 1.1.4.1 haad * fully flushed and the on disk log is empty.
1484 1.1.4.1 haad */
1485 1.1.4.1 haad if (waitfor) {
1486 1.1.4.1 haad error = wapbl_truncate(wl, wl->wl_circ_size -
1487 1.1.4.1 haad wl->wl_reserved_bytes, wapbl_lazy_truncate);
1488 1.1.4.1 haad }
1489 1.1.4.1 haad
1490 1.1.4.1 haad out2:
1491 1.1.4.1 haad if (error) {
1492 1.1.4.1 haad wl->wl_flush_abort(wl->wl_mount, wl->wl_deallocblks,
1493 1.1.4.1 haad wl->wl_dealloclens, wl->wl_dealloccnt);
1494 1.1.4.1 haad }
1495 1.1.4.1 haad
1496 1.1.4.1 haad #ifdef WAPBL_DEBUG_PRINT
1497 1.1.4.1 haad if (error) {
1498 1.1.4.1 haad pid_t pid = -1;
1499 1.1.4.1 haad lwpid_t lid = -1;
1500 1.1.4.1 haad if (curproc)
1501 1.1.4.1 haad pid = curproc->p_pid;
1502 1.1.4.1 haad if (curlwp)
1503 1.1.4.1 haad lid = curlwp->l_lid;
1504 1.1.4.1 haad mutex_enter(&wl->wl_mtx);
1505 1.1.4.1 haad #ifdef WAPBL_DEBUG_BUFBYTES
1506 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1507 1.1.4.1 haad ("wapbl_flush: thread %d.%d aborted flush: "
1508 1.1.4.1 haad "error = %d\n"
1509 1.1.4.1 haad "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1510 1.1.4.1 haad "deallocs=%d inodes=%d\n"
1511 1.1.4.1 haad "\terrcnt = %d, reclaimable=%zu reserved=%zu "
1512 1.1.4.1 haad "unsynced=%zu\n",
1513 1.1.4.1 haad pid, lid, error, wl->wl_bufcount,
1514 1.1.4.1 haad wl->wl_bufbytes, wl->wl_bcount,
1515 1.1.4.1 haad wl->wl_dealloccnt, wl->wl_inohashcnt,
1516 1.1.4.1 haad wl->wl_error_count, wl->wl_reclaimable_bytes,
1517 1.1.4.1 haad wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1518 1.1.4.1 haad SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1519 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1520 1.1.4.1 haad ("\tentry: bufcount = %zu, reclaimable = %zu, "
1521 1.1.4.1 haad "error = %d, unsynced = %zu\n",
1522 1.1.4.1 haad we->we_bufcount, we->we_reclaimable_bytes,
1523 1.1.4.1 haad we->we_error, we->we_unsynced_bufbytes));
1524 1.1.4.1 haad }
1525 1.1.4.1 haad #else
1526 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1527 1.1.4.1 haad ("wapbl_flush: thread %d.%d aborted flush: "
1528 1.1.4.1 haad "error = %d\n"
1529 1.1.4.1 haad "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1530 1.1.4.1 haad "deallocs=%d inodes=%d\n"
1531 1.1.4.1 haad "\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
1532 1.1.4.1 haad pid, lid, error, wl->wl_bufcount,
1533 1.1.4.1 haad wl->wl_bufbytes, wl->wl_bcount,
1534 1.1.4.1 haad wl->wl_dealloccnt, wl->wl_inohashcnt,
1535 1.1.4.1 haad wl->wl_error_count, wl->wl_reclaimable_bytes,
1536 1.1.4.1 haad wl->wl_reserved_bytes));
1537 1.1.4.1 haad SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1538 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1539 1.1.4.1 haad ("\tentry: bufcount = %zu, reclaimable = %zu, "
1540 1.1.4.1 haad "error = %d\n", we->we_bufcount,
1541 1.1.4.1 haad we->we_reclaimable_bytes, we->we_error));
1542 1.1.4.1 haad }
1543 1.1.4.1 haad #endif
1544 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
1545 1.1.4.1 haad }
1546 1.1.4.1 haad #endif
1547 1.1.4.1 haad
1548 1.1.4.1 haad rw_exit(&wl->wl_rwlock);
1549 1.1.4.1 haad return error;
1550 1.1.4.1 haad }
1551 1.1.4.1 haad
1552 1.1.4.1 haad /****************************************************************/
1553 1.1.4.1 haad
1554 1.1.4.1 haad void
1555 1.1.4.1 haad wapbl_jlock_assert(struct wapbl *wl)
1556 1.1.4.1 haad {
1557 1.1.4.1 haad
1558 1.1.4.1 haad #ifdef WAPBL_DEBUG_SERIALIZE
1559 1.1.4.1 haad KASSERT(rw_write_held(&wl->wl_rwlock));
1560 1.1.4.1 haad #else
1561 1.1.4.1 haad KASSERT(rw_read_held(&wl->wl_rwlock) || rw_write_held(&wl->wl_rwlock));
1562 1.1.4.1 haad #endif
1563 1.1.4.1 haad }
1564 1.1.4.1 haad
1565 1.1.4.1 haad void
1566 1.1.4.1 haad wapbl_junlock_assert(struct wapbl *wl)
1567 1.1.4.1 haad {
1568 1.1.4.1 haad
1569 1.1.4.1 haad #ifdef WAPBL_DEBUG_SERIALIZE
1570 1.1.4.1 haad KASSERT(!rw_write_held(&wl->wl_rwlock));
1571 1.1.4.1 haad #endif
1572 1.1.4.1 haad }
1573 1.1.4.1 haad
1574 1.1.4.1 haad /****************************************************************/
1575 1.1.4.1 haad
1576 1.1.4.1 haad /* locks missing */
1577 1.1.4.1 haad void
1578 1.1.4.1 haad wapbl_print(struct wapbl *wl,
1579 1.1.4.1 haad int full,
1580 1.1.4.1 haad void (*pr)(const char *, ...))
1581 1.1.4.1 haad {
1582 1.1.4.1 haad struct buf *bp;
1583 1.1.4.1 haad struct wapbl_entry *we;
1584 1.1.4.1 haad (*pr)("wapbl %p", wl);
1585 1.1.4.1 haad (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
1586 1.1.4.1 haad wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
1587 1.1.4.1 haad (*pr)("circ = %zu, header = %zu, head = %"PRIdMAX" tail = %"PRIdMAX"\n",
1588 1.1.4.1 haad wl->wl_circ_size, wl->wl_circ_off,
1589 1.1.4.1 haad (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
1590 1.1.4.1 haad (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
1591 1.1.4.1 haad wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
1592 1.1.4.1 haad #ifdef WAPBL_DEBUG_BUFBYTES
1593 1.1.4.1 haad (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1594 1.1.4.1 haad "reserved = %zu errcnt = %d unsynced = %zu\n",
1595 1.1.4.1 haad wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1596 1.1.4.1 haad wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1597 1.1.4.1 haad wl->wl_error_count, wl->wl_unsynced_bufbytes);
1598 1.1.4.1 haad #else
1599 1.1.4.1 haad (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1600 1.1.4.1 haad "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
1601 1.1.4.1 haad wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1602 1.1.4.1 haad wl->wl_error_count);
1603 1.1.4.1 haad #endif
1604 1.1.4.1 haad (*pr)("\tdealloccnt = %d, dealloclim = %d\n",
1605 1.1.4.1 haad wl->wl_dealloccnt, wl->wl_dealloclim);
1606 1.1.4.1 haad (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
1607 1.1.4.1 haad wl->wl_inohashcnt, wl->wl_inohashmask);
1608 1.1.4.1 haad (*pr)("entries:\n");
1609 1.1.4.1 haad SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1610 1.1.4.1 haad #ifdef WAPBL_DEBUG_BUFBYTES
1611 1.1.4.1 haad (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
1612 1.1.4.1 haad "unsynced = %zu\n",
1613 1.1.4.1 haad we->we_bufcount, we->we_reclaimable_bytes,
1614 1.1.4.1 haad we->we_error, we->we_unsynced_bufbytes);
1615 1.1.4.1 haad #else
1616 1.1.4.1 haad (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
1617 1.1.4.1 haad we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
1618 1.1.4.1 haad #endif
1619 1.1.4.1 haad }
1620 1.1.4.1 haad if (full) {
1621 1.1.4.1 haad int cnt = 0;
1622 1.1.4.1 haad (*pr)("bufs =");
1623 1.1.4.1 haad LIST_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
1624 1.1.4.1 haad if (!LIST_NEXT(bp, b_wapbllist)) {
1625 1.1.4.1 haad (*pr)(" %p", bp);
1626 1.1.4.1 haad } else if ((++cnt % 6) == 0) {
1627 1.1.4.1 haad (*pr)(" %p,\n\t", bp);
1628 1.1.4.1 haad } else {
1629 1.1.4.1 haad (*pr)(" %p,", bp);
1630 1.1.4.1 haad }
1631 1.1.4.1 haad }
1632 1.1.4.1 haad (*pr)("\n");
1633 1.1.4.1 haad
1634 1.1.4.1 haad (*pr)("dealloced blks = ");
1635 1.1.4.1 haad {
1636 1.1.4.1 haad int i;
1637 1.1.4.1 haad cnt = 0;
1638 1.1.4.1 haad for (i = 0; i < wl->wl_dealloccnt; i++) {
1639 1.1.4.1 haad (*pr)(" %"PRId64":%d,",
1640 1.1.4.1 haad wl->wl_deallocblks[i],
1641 1.1.4.1 haad wl->wl_dealloclens[i]);
1642 1.1.4.1 haad if ((++cnt % 4) == 0) {
1643 1.1.4.1 haad (*pr)("\n\t");
1644 1.1.4.1 haad }
1645 1.1.4.1 haad }
1646 1.1.4.1 haad }
1647 1.1.4.1 haad (*pr)("\n");
1648 1.1.4.1 haad
1649 1.1.4.1 haad (*pr)("registered inodes = ");
1650 1.1.4.1 haad {
1651 1.1.4.1 haad int i;
1652 1.1.4.1 haad cnt = 0;
1653 1.1.4.1 haad for (i = 0; i <= wl->wl_inohashmask; i++) {
1654 1.1.4.1 haad struct wapbl_ino_head *wih;
1655 1.1.4.1 haad struct wapbl_ino *wi;
1656 1.1.4.1 haad
1657 1.1.4.1 haad wih = &wl->wl_inohash[i];
1658 1.1.4.1 haad LIST_FOREACH(wi, wih, wi_hash) {
1659 1.1.4.1 haad if (wi->wi_ino == 0)
1660 1.1.4.1 haad continue;
1661 1.1.4.1 haad (*pr)(" %"PRId32"/0%06"PRIo32",",
1662 1.1.4.1 haad wi->wi_ino, wi->wi_mode);
1663 1.1.4.1 haad if ((++cnt % 4) == 0) {
1664 1.1.4.1 haad (*pr)("\n\t");
1665 1.1.4.1 haad }
1666 1.1.4.1 haad }
1667 1.1.4.1 haad }
1668 1.1.4.1 haad (*pr)("\n");
1669 1.1.4.1 haad }
1670 1.1.4.1 haad }
1671 1.1.4.1 haad }
1672 1.1.4.1 haad
1673 1.1.4.1 haad #if defined(WAPBL_DEBUG) || defined(DDB)
1674 1.1.4.1 haad void
1675 1.1.4.1 haad wapbl_dump(struct wapbl *wl)
1676 1.1.4.1 haad {
1677 1.1.4.1 haad #if defined(WAPBL_DEBUG)
1678 1.1.4.1 haad if (!wl)
1679 1.1.4.1 haad wl = wapbl_debug_wl;
1680 1.1.4.1 haad #endif
1681 1.1.4.1 haad if (!wl)
1682 1.1.4.1 haad return;
1683 1.1.4.1 haad wapbl_print(wl, 1, printf);
1684 1.1.4.1 haad }
1685 1.1.4.1 haad #endif
1686 1.1.4.1 haad
1687 1.1.4.1 haad /****************************************************************/
1688 1.1.4.1 haad
1689 1.1.4.1 haad void
1690 1.1.4.1 haad wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len)
1691 1.1.4.1 haad {
1692 1.1.4.1 haad
1693 1.1.4.1 haad wapbl_jlock_assert(wl);
1694 1.1.4.1 haad
1695 1.1.4.1 haad /* XXX should eventually instead tie this into resource estimation */
1696 1.1.4.1 haad /* XXX this KASSERT needs locking/mutex analysis */
1697 1.1.4.1 haad KASSERT(wl->wl_dealloccnt < wl->wl_dealloclim);
1698 1.1.4.1 haad wl->wl_deallocblks[wl->wl_dealloccnt] = blk;
1699 1.1.4.1 haad wl->wl_dealloclens[wl->wl_dealloccnt] = len;
1700 1.1.4.1 haad wl->wl_dealloccnt++;
1701 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
1702 1.1.4.1 haad ("wapbl_register_deallocation: blk=%"PRId64" len=%d\n", blk, len));
1703 1.1.4.1 haad }
1704 1.1.4.1 haad
1705 1.1.4.1 haad /****************************************************************/
1706 1.1.4.1 haad
1707 1.1.4.1 haad static void
1708 1.1.4.1 haad wapbl_inodetrk_init(struct wapbl *wl, u_int size)
1709 1.1.4.1 haad {
1710 1.1.4.1 haad
1711 1.1.4.1 haad wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
1712 1.1.4.1 haad if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) {
1713 1.1.4.1 haad pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0,
1714 1.1.4.1 haad "wapblinopl", &pool_allocator_nointr, IPL_NONE);
1715 1.1.4.1 haad }
1716 1.1.4.1 haad }
1717 1.1.4.1 haad
1718 1.1.4.1 haad static void
1719 1.1.4.1 haad wapbl_inodetrk_free(struct wapbl *wl)
1720 1.1.4.1 haad {
1721 1.1.4.1 haad
1722 1.1.4.1 haad /* XXX this KASSERT needs locking/mutex analysis */
1723 1.1.4.1 haad KASSERT(wl->wl_inohashcnt == 0);
1724 1.1.4.1 haad hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
1725 1.1.4.1 haad if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) {
1726 1.1.4.1 haad pool_destroy(&wapbl_ino_pool);
1727 1.1.4.1 haad }
1728 1.1.4.1 haad }
1729 1.1.4.1 haad
1730 1.1.4.1 haad static struct wapbl_ino *
1731 1.1.4.1 haad wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
1732 1.1.4.1 haad {
1733 1.1.4.1 haad struct wapbl_ino_head *wih;
1734 1.1.4.1 haad struct wapbl_ino *wi;
1735 1.1.4.1 haad
1736 1.1.4.1 haad KASSERT(mutex_owned(&wl->wl_mtx));
1737 1.1.4.1 haad
1738 1.1.4.1 haad wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1739 1.1.4.1 haad LIST_FOREACH(wi, wih, wi_hash) {
1740 1.1.4.1 haad if (ino == wi->wi_ino)
1741 1.1.4.1 haad return wi;
1742 1.1.4.1 haad }
1743 1.1.4.1 haad return 0;
1744 1.1.4.1 haad }
1745 1.1.4.1 haad
1746 1.1.4.1 haad void
1747 1.1.4.1 haad wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1748 1.1.4.1 haad {
1749 1.1.4.1 haad struct wapbl_ino_head *wih;
1750 1.1.4.1 haad struct wapbl_ino *wi;
1751 1.1.4.1 haad
1752 1.1.4.1 haad wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
1753 1.1.4.1 haad
1754 1.1.4.1 haad mutex_enter(&wl->wl_mtx);
1755 1.1.4.1 haad if (wapbl_inodetrk_get(wl, ino) == NULL) {
1756 1.1.4.1 haad wi->wi_ino = ino;
1757 1.1.4.1 haad wi->wi_mode = mode;
1758 1.1.4.1 haad wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1759 1.1.4.1 haad LIST_INSERT_HEAD(wih, wi, wi_hash);
1760 1.1.4.1 haad wl->wl_inohashcnt++;
1761 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_INODE,
1762 1.1.4.1 haad ("wapbl_register_inode: ino=%"PRId64"\n", ino));
1763 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
1764 1.1.4.1 haad } else {
1765 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
1766 1.1.4.1 haad pool_put(&wapbl_ino_pool, wi);
1767 1.1.4.1 haad }
1768 1.1.4.1 haad }
1769 1.1.4.1 haad
1770 1.1.4.1 haad void
1771 1.1.4.1 haad wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1772 1.1.4.1 haad {
1773 1.1.4.1 haad struct wapbl_ino *wi;
1774 1.1.4.1 haad
1775 1.1.4.1 haad mutex_enter(&wl->wl_mtx);
1776 1.1.4.1 haad wi = wapbl_inodetrk_get(wl, ino);
1777 1.1.4.1 haad if (wi) {
1778 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_INODE,
1779 1.1.4.1 haad ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
1780 1.1.4.1 haad KASSERT(wl->wl_inohashcnt > 0);
1781 1.1.4.1 haad wl->wl_inohashcnt--;
1782 1.1.4.1 haad LIST_REMOVE(wi, wi_hash);
1783 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
1784 1.1.4.1 haad
1785 1.1.4.1 haad pool_put(&wapbl_ino_pool, wi);
1786 1.1.4.1 haad } else {
1787 1.1.4.1 haad mutex_exit(&wl->wl_mtx);
1788 1.1.4.1 haad }
1789 1.1.4.1 haad }
1790 1.1.4.1 haad
1791 1.1.4.1 haad /****************************************************************/
1792 1.1.4.1 haad
1793 1.1.4.1 haad static __inline size_t
1794 1.1.4.1 haad wapbl_transaction_inodes_len(struct wapbl *wl)
1795 1.1.4.1 haad {
1796 1.1.4.1 haad int blocklen = 1<<wl->wl_log_dev_bshift;
1797 1.1.4.1 haad int iph;
1798 1.1.4.1 haad
1799 1.1.4.1 haad /* Calculate number of inodes described in a inodelist header */
1800 1.1.4.1 haad iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
1801 1.1.4.1 haad sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
1802 1.1.4.1 haad
1803 1.1.4.1 haad KASSERT(iph > 0);
1804 1.1.4.1 haad
1805 1.1.4.1 haad return MAX(1, howmany(wl->wl_inohashcnt, iph))*blocklen;
1806 1.1.4.1 haad }
1807 1.1.4.1 haad
1808 1.1.4.1 haad
1809 1.1.4.1 haad /* Calculate amount of space a transaction will take on disk */
1810 1.1.4.1 haad static size_t
1811 1.1.4.1 haad wapbl_transaction_len(struct wapbl *wl)
1812 1.1.4.1 haad {
1813 1.1.4.1 haad int blocklen = 1<<wl->wl_log_dev_bshift;
1814 1.1.4.1 haad size_t len;
1815 1.1.4.1 haad int bph;
1816 1.1.4.1 haad
1817 1.1.4.1 haad /* Calculate number of blocks described in a blocklist header */
1818 1.1.4.1 haad bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1819 1.1.4.1 haad sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1820 1.1.4.1 haad
1821 1.1.4.1 haad KASSERT(bph > 0);
1822 1.1.4.1 haad
1823 1.1.4.1 haad len = wl->wl_bcount;
1824 1.1.4.1 haad len += howmany(wl->wl_bufcount, bph)*blocklen;
1825 1.1.4.1 haad len += howmany(wl->wl_dealloccnt, bph)*blocklen;
1826 1.1.4.1 haad len += wapbl_transaction_inodes_len(wl);
1827 1.1.4.1 haad
1828 1.1.4.1 haad return len;
1829 1.1.4.1 haad }
1830 1.1.4.1 haad
1831 1.1.4.1 haad /*
1832 1.1.4.1 haad * Perform commit operation
1833 1.1.4.1 haad *
1834 1.1.4.1 haad * Note that generation number incrementation needs to
1835 1.1.4.1 haad * be protected against racing with other invocations
1836 1.1.4.1 haad * of wapbl_commit. This is ok since this routine
1837 1.1.4.1 haad * is only invoked from wapbl_flush
1838 1.1.4.1 haad */
1839 1.1.4.1 haad static int
1840 1.1.4.1 haad wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
1841 1.1.4.1 haad {
1842 1.1.4.1 haad struct wapbl_wc_header *wc = wl->wl_wc_header;
1843 1.1.4.1 haad struct timespec ts;
1844 1.1.4.1 haad int error;
1845 1.1.4.1 haad int force = 1;
1846 1.1.4.1 haad
1847 1.1.4.1 haad /* XXX Calc checksum here, instead we do this for now */
1848 1.1.4.1 haad error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1849 1.1.4.1 haad if (error) {
1850 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1851 1.1.4.1 haad ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1852 1.1.4.1 haad "returned %d\n", wl->wl_devvp->v_rdev, error));
1853 1.1.4.1 haad }
1854 1.1.4.1 haad
1855 1.1.4.1 haad wc->wc_head = head;
1856 1.1.4.1 haad wc->wc_tail = tail;
1857 1.1.4.1 haad wc->wc_checksum = 0;
1858 1.1.4.1 haad wc->wc_version = 1;
1859 1.1.4.1 haad getnanotime(&ts);
1860 1.1.4.1 haad wc->wc_time = ts.tv_sec;;
1861 1.1.4.1 haad wc->wc_timensec = ts.tv_nsec;
1862 1.1.4.1 haad
1863 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1864 1.1.4.1 haad ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
1865 1.1.4.1 haad (intmax_t)head, (intmax_t)tail));
1866 1.1.4.1 haad
1867 1.1.4.1 haad /*
1868 1.1.4.1 haad * XXX if generation will rollover, then first zero
1869 1.1.4.1 haad * over second commit header before trying to write both headers.
1870 1.1.4.1 haad */
1871 1.1.4.1 haad
1872 1.1.4.1 haad error = wapbl_write(wc, wc->wc_len, wl->wl_devvp,
1873 1.1.4.1 haad wl->wl_logpbn + wc->wc_generation % 2);
1874 1.1.4.1 haad if (error)
1875 1.1.4.1 haad return error;
1876 1.1.4.1 haad
1877 1.1.4.1 haad error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1878 1.1.4.1 haad if (error) {
1879 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1880 1.1.4.1 haad ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1881 1.1.4.1 haad "returned %d\n", wl->wl_devvp->v_rdev, error));
1882 1.1.4.1 haad }
1883 1.1.4.1 haad
1884 1.1.4.1 haad /*
1885 1.1.4.1 haad * If the generation number was zero, write it out a second time.
1886 1.1.4.1 haad * This handles initialization and generation number rollover
1887 1.1.4.1 haad */
1888 1.1.4.1 haad if (wc->wc_generation++ == 0) {
1889 1.1.4.1 haad error = wapbl_write_commit(wl, head, tail);
1890 1.1.4.1 haad /*
1891 1.1.4.1 haad * This panic should be able to be removed if we do the
1892 1.1.4.1 haad * zero'ing mentioned above, and we are certain to roll
1893 1.1.4.1 haad * back generation number on failure.
1894 1.1.4.1 haad */
1895 1.1.4.1 haad if (error)
1896 1.1.4.1 haad panic("wapbl_write_commit: error writing duplicate "
1897 1.1.4.1 haad "log header: %d\n", error);
1898 1.1.4.1 haad }
1899 1.1.4.1 haad return 0;
1900 1.1.4.1 haad }
1901 1.1.4.1 haad
1902 1.1.4.1 haad /* Returns new offset value */
1903 1.1.4.1 haad static int
1904 1.1.4.1 haad wapbl_write_blocks(struct wapbl *wl, off_t *offp)
1905 1.1.4.1 haad {
1906 1.1.4.1 haad struct wapbl_wc_blocklist *wc =
1907 1.1.4.1 haad (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1908 1.1.4.1 haad int blocklen = 1<<wl->wl_log_dev_bshift;
1909 1.1.4.1 haad int bph;
1910 1.1.4.1 haad struct buf *bp;
1911 1.1.4.1 haad off_t off = *offp;
1912 1.1.4.1 haad int error;
1913 1.1.4.1 haad
1914 1.1.4.1 haad KASSERT(rw_write_held(&wl->wl_rwlock));
1915 1.1.4.1 haad
1916 1.1.4.1 haad bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1917 1.1.4.1 haad sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1918 1.1.4.1 haad
1919 1.1.4.1 haad bp = LIST_FIRST(&wl->wl_bufs);
1920 1.1.4.1 haad
1921 1.1.4.1 haad while (bp) {
1922 1.1.4.1 haad int cnt;
1923 1.1.4.1 haad struct buf *obp = bp;
1924 1.1.4.1 haad
1925 1.1.4.1 haad KASSERT(bp->b_flags & B_LOCKED);
1926 1.1.4.1 haad
1927 1.1.4.1 haad wc->wc_type = WAPBL_WC_BLOCKS;
1928 1.1.4.1 haad wc->wc_len = blocklen;
1929 1.1.4.1 haad wc->wc_blkcount = 0;
1930 1.1.4.1 haad while (bp && (wc->wc_blkcount < bph)) {
1931 1.1.4.1 haad /*
1932 1.1.4.1 haad * Make sure all the physical block numbers are up to
1933 1.1.4.1 haad * date. If this is not always true on a given
1934 1.1.4.1 haad * filesystem, then VOP_BMAP must be called. We
1935 1.1.4.1 haad * could call VOP_BMAP here, or else in the filesystem
1936 1.1.4.1 haad * specific flush callback, although neither of those
1937 1.1.4.1 haad * solutions allow us to take the vnode lock. If a
1938 1.1.4.1 haad * filesystem requires that we must take the vnode lock
1939 1.1.4.1 haad * to call VOP_BMAP, then we can probably do it in
1940 1.1.4.1 haad * bwrite when the vnode lock should already be held
1941 1.1.4.1 haad * by the invoking code.
1942 1.1.4.1 haad */
1943 1.1.4.1 haad KASSERT((bp->b_vp->v_type == VBLK) ||
1944 1.1.4.1 haad (bp->b_blkno != bp->b_lblkno));
1945 1.1.4.1 haad KASSERT(bp->b_blkno > 0);
1946 1.1.4.1 haad
1947 1.1.4.1 haad wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
1948 1.1.4.1 haad wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
1949 1.1.4.1 haad wc->wc_len += bp->b_bcount;
1950 1.1.4.1 haad wc->wc_blkcount++;
1951 1.1.4.1 haad bp = LIST_NEXT(bp, b_wapbllist);
1952 1.1.4.1 haad }
1953 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1954 1.1.4.1 haad ("wapbl_write_blocks: len = %u off = %"PRIdMAX"\n",
1955 1.1.4.1 haad wc->wc_len, (intmax_t)off));
1956 1.1.4.1 haad
1957 1.1.4.1 haad error = wapbl_circ_write(wl, wc, blocklen, &off);
1958 1.1.4.1 haad if (error)
1959 1.1.4.1 haad return error;
1960 1.1.4.1 haad bp = obp;
1961 1.1.4.1 haad cnt = 0;
1962 1.1.4.1 haad while (bp && (cnt++ < bph)) {
1963 1.1.4.1 haad error = wapbl_circ_write(wl, bp->b_data,
1964 1.1.4.1 haad bp->b_bcount, &off);
1965 1.1.4.1 haad if (error)
1966 1.1.4.1 haad return error;
1967 1.1.4.1 haad bp = LIST_NEXT(bp, b_wapbllist);
1968 1.1.4.1 haad }
1969 1.1.4.1 haad }
1970 1.1.4.1 haad *offp = off;
1971 1.1.4.1 haad return 0;
1972 1.1.4.1 haad }
1973 1.1.4.1 haad
1974 1.1.4.1 haad static int
1975 1.1.4.1 haad wapbl_write_revocations(struct wapbl *wl, off_t *offp)
1976 1.1.4.1 haad {
1977 1.1.4.1 haad struct wapbl_wc_blocklist *wc =
1978 1.1.4.1 haad (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1979 1.1.4.1 haad int i;
1980 1.1.4.1 haad int blocklen = 1<<wl->wl_log_dev_bshift;
1981 1.1.4.1 haad int bph;
1982 1.1.4.1 haad off_t off = *offp;
1983 1.1.4.1 haad int error;
1984 1.1.4.1 haad
1985 1.1.4.1 haad if (wl->wl_dealloccnt == 0)
1986 1.1.4.1 haad return 0;
1987 1.1.4.1 haad
1988 1.1.4.1 haad bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1989 1.1.4.1 haad sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1990 1.1.4.1 haad
1991 1.1.4.1 haad i = 0;
1992 1.1.4.1 haad while (i < wl->wl_dealloccnt) {
1993 1.1.4.1 haad wc->wc_type = WAPBL_WC_REVOCATIONS;
1994 1.1.4.1 haad wc->wc_len = blocklen;
1995 1.1.4.1 haad wc->wc_blkcount = 0;
1996 1.1.4.1 haad while ((i < wl->wl_dealloccnt) && (wc->wc_blkcount < bph)) {
1997 1.1.4.1 haad wc->wc_blocks[wc->wc_blkcount].wc_daddr =
1998 1.1.4.1 haad wl->wl_deallocblks[i];
1999 1.1.4.1 haad wc->wc_blocks[wc->wc_blkcount].wc_dlen =
2000 1.1.4.1 haad wl->wl_dealloclens[i];
2001 1.1.4.1 haad wc->wc_blkcount++;
2002 1.1.4.1 haad i++;
2003 1.1.4.1 haad }
2004 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2005 1.1.4.1 haad ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
2006 1.1.4.1 haad wc->wc_len, (intmax_t)off));
2007 1.1.4.1 haad error = wapbl_circ_write(wl, wc, blocklen, &off);
2008 1.1.4.1 haad if (error)
2009 1.1.4.1 haad return error;
2010 1.1.4.1 haad }
2011 1.1.4.1 haad *offp = off;
2012 1.1.4.1 haad return 0;
2013 1.1.4.1 haad }
2014 1.1.4.1 haad
2015 1.1.4.1 haad static int
2016 1.1.4.1 haad wapbl_write_inodes(struct wapbl *wl, off_t *offp)
2017 1.1.4.1 haad {
2018 1.1.4.1 haad struct wapbl_wc_inodelist *wc =
2019 1.1.4.1 haad (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
2020 1.1.4.1 haad int i;
2021 1.1.4.1 haad int blocklen = 1<<wl->wl_log_dev_bshift;
2022 1.1.4.1 haad off_t off = *offp;
2023 1.1.4.1 haad int error;
2024 1.1.4.1 haad
2025 1.1.4.1 haad struct wapbl_ino_head *wih;
2026 1.1.4.1 haad struct wapbl_ino *wi;
2027 1.1.4.1 haad int iph;
2028 1.1.4.1 haad
2029 1.1.4.1 haad iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2030 1.1.4.1 haad sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2031 1.1.4.1 haad
2032 1.1.4.1 haad i = 0;
2033 1.1.4.1 haad wih = &wl->wl_inohash[0];
2034 1.1.4.1 haad wi = 0;
2035 1.1.4.1 haad do {
2036 1.1.4.1 haad wc->wc_type = WAPBL_WC_INODES;
2037 1.1.4.1 haad wc->wc_len = blocklen;
2038 1.1.4.1 haad wc->wc_inocnt = 0;
2039 1.1.4.1 haad wc->wc_clear = (i == 0);
2040 1.1.4.1 haad while ((i < wl->wl_inohashcnt) && (wc->wc_inocnt < iph)) {
2041 1.1.4.1 haad while (!wi) {
2042 1.1.4.1 haad KASSERT((wih - &wl->wl_inohash[0])
2043 1.1.4.1 haad <= wl->wl_inohashmask);
2044 1.1.4.1 haad wi = LIST_FIRST(wih++);
2045 1.1.4.1 haad }
2046 1.1.4.1 haad wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2047 1.1.4.1 haad wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2048 1.1.4.1 haad wc->wc_inocnt++;
2049 1.1.4.1 haad i++;
2050 1.1.4.1 haad wi = LIST_NEXT(wi, wi_hash);
2051 1.1.4.1 haad }
2052 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2053 1.1.4.1 haad ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2054 1.1.4.1 haad wc->wc_len, (intmax_t)off));
2055 1.1.4.1 haad error = wapbl_circ_write(wl, wc, blocklen, &off);
2056 1.1.4.1 haad if (error)
2057 1.1.4.1 haad return error;
2058 1.1.4.1 haad } while (i < wl->wl_inohashcnt);
2059 1.1.4.1 haad
2060 1.1.4.1 haad *offp = off;
2061 1.1.4.1 haad return 0;
2062 1.1.4.1 haad }
2063 1.1.4.1 haad
2064 1.1.4.1 haad #endif /* _KERNEL */
2065 1.1.4.1 haad
2066 1.1.4.1 haad /****************************************************************/
2067 1.1.4.1 haad
2068 1.1.4.1 haad #ifdef _KERNEL
2069 1.1.4.1 haad static struct pool wapbl_blk_pool;
2070 1.1.4.1 haad static int wapbl_blk_pool_refcount;
2071 1.1.4.1 haad #endif
2072 1.1.4.1 haad struct wapbl_blk {
2073 1.1.4.1 haad LIST_ENTRY(wapbl_blk) wb_hash;
2074 1.1.4.1 haad daddr_t wb_blk;
2075 1.1.4.1 haad off_t wb_off; /* Offset of this block in the log */
2076 1.1.4.1 haad };
2077 1.1.4.1 haad #define WAPBL_BLKPOOL_MIN 83
2078 1.1.4.1 haad
2079 1.1.4.1 haad static void
2080 1.1.4.1 haad wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2081 1.1.4.1 haad {
2082 1.1.4.1 haad if (size < WAPBL_BLKPOOL_MIN)
2083 1.1.4.1 haad size = WAPBL_BLKPOOL_MIN;
2084 1.1.4.1 haad KASSERT(wr->wr_blkhash == 0);
2085 1.1.4.1 haad #ifdef _KERNEL
2086 1.1.4.1 haad wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2087 1.1.4.1 haad if (atomic_inc_uint_nv(&wapbl_blk_pool_refcount) == 1) {
2088 1.1.4.1 haad pool_init(&wapbl_blk_pool, sizeof(struct wapbl_blk), 0, 0, 0,
2089 1.1.4.1 haad "wapblblkpl", &pool_allocator_nointr, IPL_NONE);
2090 1.1.4.1 haad }
2091 1.1.4.1 haad #else /* ! _KERNEL */
2092 1.1.4.1 haad /* Manually implement hashinit */
2093 1.1.4.1 haad {
2094 1.1.4.1 haad int i;
2095 1.1.4.1 haad unsigned long hashsize;
2096 1.1.4.1 haad for (hashsize = 1; hashsize < size; hashsize <<= 1)
2097 1.1.4.1 haad continue;
2098 1.1.4.1 haad wr->wr_blkhash = wapbl_malloc(hashsize * sizeof(*wr->wr_blkhash));
2099 1.1.4.1 haad for (i = 0; i < wr->wr_blkhashmask; i++)
2100 1.1.4.1 haad LIST_INIT(&wr->wr_blkhash[i]);
2101 1.1.4.1 haad wr->wr_blkhashmask = hashsize - 1;
2102 1.1.4.1 haad }
2103 1.1.4.1 haad #endif /* ! _KERNEL */
2104 1.1.4.1 haad }
2105 1.1.4.1 haad
2106 1.1.4.1 haad static void
2107 1.1.4.1 haad wapbl_blkhash_free(struct wapbl_replay *wr)
2108 1.1.4.1 haad {
2109 1.1.4.1 haad KASSERT(wr->wr_blkhashcnt == 0);
2110 1.1.4.1 haad #ifdef _KERNEL
2111 1.1.4.1 haad hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2112 1.1.4.1 haad if (atomic_dec_uint_nv(&wapbl_blk_pool_refcount) == 0) {
2113 1.1.4.1 haad pool_destroy(&wapbl_blk_pool);
2114 1.1.4.1 haad }
2115 1.1.4.1 haad #else /* ! _KERNEL */
2116 1.1.4.1 haad wapbl_free(wr->wr_blkhash);
2117 1.1.4.1 haad #endif /* ! _KERNEL */
2118 1.1.4.1 haad }
2119 1.1.4.1 haad
2120 1.1.4.1 haad static struct wapbl_blk *
2121 1.1.4.1 haad wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2122 1.1.4.1 haad {
2123 1.1.4.1 haad struct wapbl_blk_head *wbh;
2124 1.1.4.1 haad struct wapbl_blk *wb;
2125 1.1.4.1 haad wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2126 1.1.4.1 haad LIST_FOREACH(wb, wbh, wb_hash) {
2127 1.1.4.1 haad if (blk == wb->wb_blk)
2128 1.1.4.1 haad return wb;
2129 1.1.4.1 haad }
2130 1.1.4.1 haad return 0;
2131 1.1.4.1 haad }
2132 1.1.4.1 haad
2133 1.1.4.1 haad static void
2134 1.1.4.1 haad wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2135 1.1.4.1 haad {
2136 1.1.4.1 haad struct wapbl_blk_head *wbh;
2137 1.1.4.1 haad struct wapbl_blk *wb;
2138 1.1.4.1 haad wb = wapbl_blkhash_get(wr, blk);
2139 1.1.4.1 haad if (wb) {
2140 1.1.4.1 haad KASSERT(wb->wb_blk == blk);
2141 1.1.4.1 haad wb->wb_off = off;
2142 1.1.4.1 haad } else {
2143 1.1.4.1 haad #ifdef _KERNEL
2144 1.1.4.1 haad wb = pool_get(&wapbl_blk_pool, PR_WAITOK);
2145 1.1.4.1 haad #else /* ! _KERNEL */
2146 1.1.4.1 haad wb = wapbl_malloc(sizeof(*wb));
2147 1.1.4.1 haad #endif /* ! _KERNEL */
2148 1.1.4.1 haad wb->wb_blk = blk;
2149 1.1.4.1 haad wb->wb_off = off;
2150 1.1.4.1 haad wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2151 1.1.4.1 haad LIST_INSERT_HEAD(wbh, wb, wb_hash);
2152 1.1.4.1 haad wr->wr_blkhashcnt++;
2153 1.1.4.1 haad }
2154 1.1.4.1 haad }
2155 1.1.4.1 haad
2156 1.1.4.1 haad static void
2157 1.1.4.1 haad wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2158 1.1.4.1 haad {
2159 1.1.4.1 haad struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2160 1.1.4.1 haad if (wb) {
2161 1.1.4.1 haad KASSERT(wr->wr_blkhashcnt > 0);
2162 1.1.4.1 haad wr->wr_blkhashcnt--;
2163 1.1.4.1 haad LIST_REMOVE(wb, wb_hash);
2164 1.1.4.1 haad #ifdef _KERNEL
2165 1.1.4.1 haad pool_put(&wapbl_blk_pool, wb);
2166 1.1.4.1 haad #else /* ! _KERNEL */
2167 1.1.4.1 haad wapbl_free(wb);
2168 1.1.4.1 haad #endif /* ! _KERNEL */
2169 1.1.4.1 haad }
2170 1.1.4.1 haad }
2171 1.1.4.1 haad
2172 1.1.4.1 haad static void
2173 1.1.4.1 haad wapbl_blkhash_clear(struct wapbl_replay *wr)
2174 1.1.4.1 haad {
2175 1.1.4.1 haad int i;
2176 1.1.4.1 haad for (i = 0; i <= wr->wr_blkhashmask; i++) {
2177 1.1.4.1 haad struct wapbl_blk *wb;
2178 1.1.4.1 haad
2179 1.1.4.1 haad while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2180 1.1.4.1 haad KASSERT(wr->wr_blkhashcnt > 0);
2181 1.1.4.1 haad wr->wr_blkhashcnt--;
2182 1.1.4.1 haad LIST_REMOVE(wb, wb_hash);
2183 1.1.4.1 haad #ifdef _KERNEL
2184 1.1.4.1 haad pool_put(&wapbl_blk_pool, wb);
2185 1.1.4.1 haad #else /* ! _KERNEL */
2186 1.1.4.1 haad wapbl_free(wb);
2187 1.1.4.1 haad #endif /* ! _KERNEL */
2188 1.1.4.1 haad }
2189 1.1.4.1 haad }
2190 1.1.4.1 haad KASSERT(wr->wr_blkhashcnt == 0);
2191 1.1.4.1 haad }
2192 1.1.4.1 haad
2193 1.1.4.1 haad /****************************************************************/
2194 1.1.4.1 haad
2195 1.1.4.1 haad static int
2196 1.1.4.1 haad wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2197 1.1.4.1 haad {
2198 1.1.4.1 haad size_t slen;
2199 1.1.4.1 haad struct wapbl_wc_header *wc = &wr->wr_wc_header;
2200 1.1.4.1 haad off_t off = *offp;
2201 1.1.4.1 haad int error;
2202 1.1.4.1 haad
2203 1.1.4.1 haad KASSERT(((len >> wc->wc_log_dev_bshift) <<
2204 1.1.4.1 haad wc->wc_log_dev_bshift) == len);
2205 1.1.4.1 haad if (off < wc->wc_circ_off)
2206 1.1.4.1 haad off = wc->wc_circ_off;
2207 1.1.4.1 haad slen = wc->wc_circ_off + wc->wc_circ_size - off;
2208 1.1.4.1 haad if (slen < len) {
2209 1.1.4.1 haad error = wapbl_read(data, slen, wr->wr_devvp,
2210 1.1.4.1 haad wr->wr_logpbn + (off >> wc->wc_log_dev_bshift));
2211 1.1.4.1 haad if (error)
2212 1.1.4.1 haad return error;
2213 1.1.4.1 haad data = (uint8_t *)data + slen;
2214 1.1.4.1 haad len -= slen;
2215 1.1.4.1 haad off = wc->wc_circ_off;
2216 1.1.4.1 haad }
2217 1.1.4.1 haad error = wapbl_read(data, len, wr->wr_devvp,
2218 1.1.4.1 haad wr->wr_logpbn + (off >> wc->wc_log_dev_bshift));
2219 1.1.4.1 haad if (error)
2220 1.1.4.1 haad return error;
2221 1.1.4.1 haad off += len;
2222 1.1.4.1 haad if (off >= wc->wc_circ_off + wc->wc_circ_size)
2223 1.1.4.1 haad off = wc->wc_circ_off;
2224 1.1.4.1 haad *offp = off;
2225 1.1.4.1 haad return 0;
2226 1.1.4.1 haad }
2227 1.1.4.1 haad
2228 1.1.4.1 haad static void
2229 1.1.4.1 haad wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2230 1.1.4.1 haad {
2231 1.1.4.1 haad size_t slen;
2232 1.1.4.1 haad struct wapbl_wc_header *wc = &wr->wr_wc_header;
2233 1.1.4.1 haad off_t off = *offp;
2234 1.1.4.1 haad
2235 1.1.4.1 haad KASSERT(((len >> wc->wc_log_dev_bshift) <<
2236 1.1.4.1 haad wc->wc_log_dev_bshift) == len);
2237 1.1.4.1 haad
2238 1.1.4.1 haad if (off < wc->wc_circ_off)
2239 1.1.4.1 haad off = wc->wc_circ_off;
2240 1.1.4.1 haad slen = wc->wc_circ_off + wc->wc_circ_size - off;
2241 1.1.4.1 haad if (slen < len) {
2242 1.1.4.1 haad len -= slen;
2243 1.1.4.1 haad off = wc->wc_circ_off;
2244 1.1.4.1 haad }
2245 1.1.4.1 haad off += len;
2246 1.1.4.1 haad if (off >= wc->wc_circ_off + wc->wc_circ_size)
2247 1.1.4.1 haad off = wc->wc_circ_off;
2248 1.1.4.1 haad *offp = off;
2249 1.1.4.1 haad }
2250 1.1.4.1 haad
2251 1.1.4.1 haad /****************************************************************/
2252 1.1.4.1 haad
2253 1.1.4.1 haad int
2254 1.1.4.1 haad wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2255 1.1.4.1 haad daddr_t off, size_t count, size_t blksize)
2256 1.1.4.1 haad {
2257 1.1.4.1 haad struct wapbl_replay *wr;
2258 1.1.4.1 haad int error;
2259 1.1.4.1 haad struct vnode *devvp;
2260 1.1.4.1 haad daddr_t logpbn;
2261 1.1.4.1 haad uint8_t *scratch;
2262 1.1.4.1 haad struct wapbl_wc_header *wch;
2263 1.1.4.1 haad struct wapbl_wc_header *wch2;
2264 1.1.4.1 haad /* Use this until we read the actual log header */
2265 1.1.4.1 haad int log_dev_bshift = DEV_BSHIFT;
2266 1.1.4.1 haad size_t used;
2267 1.1.4.1 haad
2268 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2269 1.1.4.1 haad ("wapbl_replay_start: vp=%p off=%"PRId64 " count=%zu blksize=%zu\n",
2270 1.1.4.1 haad vp, off, count, blksize));
2271 1.1.4.1 haad
2272 1.1.4.1 haad if (off < 0)
2273 1.1.4.1 haad return EINVAL;
2274 1.1.4.1 haad
2275 1.1.4.1 haad if (blksize < DEV_BSIZE)
2276 1.1.4.1 haad return EINVAL;
2277 1.1.4.1 haad if (blksize % DEV_BSIZE)
2278 1.1.4.1 haad return EINVAL;
2279 1.1.4.1 haad
2280 1.1.4.1 haad #ifdef _KERNEL
2281 1.1.4.1 haad #if 0
2282 1.1.4.1 haad /* XXX vp->v_size isn't reliably set for VBLK devices,
2283 1.1.4.1 haad * especially root. However, we might still want to verify
2284 1.1.4.1 haad * that the full load is readable */
2285 1.1.4.1 haad if ((off + count) * blksize > vp->v_size)
2286 1.1.4.1 haad return EINVAL;
2287 1.1.4.1 haad #endif
2288 1.1.4.1 haad
2289 1.1.4.1 haad if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2290 1.1.4.1 haad return error;
2291 1.1.4.1 haad }
2292 1.1.4.1 haad #else /* ! _KERNEL */
2293 1.1.4.1 haad devvp = vp;
2294 1.1.4.1 haad logpbn = off;
2295 1.1.4.1 haad #endif /* ! _KERNEL */
2296 1.1.4.1 haad
2297 1.1.4.1 haad scratch = wapbl_malloc(MAXBSIZE);
2298 1.1.4.1 haad
2299 1.1.4.1 haad error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, logpbn);
2300 1.1.4.1 haad if (error)
2301 1.1.4.1 haad goto errout;
2302 1.1.4.1 haad
2303 1.1.4.1 haad wch = (struct wapbl_wc_header *)scratch;
2304 1.1.4.1 haad wch2 =
2305 1.1.4.1 haad (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
2306 1.1.4.1 haad /* XXX verify checksums and magic numbers */
2307 1.1.4.1 haad if (wch->wc_type != WAPBL_WC_HEADER) {
2308 1.1.4.1 haad printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
2309 1.1.4.1 haad error = EFTYPE;
2310 1.1.4.1 haad goto errout;
2311 1.1.4.1 haad }
2312 1.1.4.1 haad
2313 1.1.4.1 haad if (wch2->wc_generation > wch->wc_generation)
2314 1.1.4.1 haad wch = wch2;
2315 1.1.4.1 haad
2316 1.1.4.1 haad wr = wapbl_calloc(1, sizeof(*wr));
2317 1.1.4.1 haad
2318 1.1.4.1 haad wr->wr_logvp = vp;
2319 1.1.4.1 haad wr->wr_devvp = devvp;
2320 1.1.4.1 haad wr->wr_logpbn = logpbn;
2321 1.1.4.1 haad
2322 1.1.4.1 haad wr->wr_scratch = scratch;
2323 1.1.4.1 haad
2324 1.1.4.1 haad memcpy(&wr->wr_wc_header, wch, sizeof(wr->wr_wc_header));
2325 1.1.4.1 haad
2326 1.1.4.1 haad used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
2327 1.1.4.1 haad
2328 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2329 1.1.4.1 haad ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
2330 1.1.4.1 haad " len=%"PRId64" used=%zu\n",
2331 1.1.4.1 haad wch->wc_head, wch->wc_tail, wch->wc_circ_off,
2332 1.1.4.1 haad wch->wc_circ_size, used));
2333 1.1.4.1 haad
2334 1.1.4.1 haad wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
2335 1.1.4.1 haad error = wapbl_replay_prescan(wr);
2336 1.1.4.1 haad if (error) {
2337 1.1.4.1 haad wapbl_replay_stop(wr);
2338 1.1.4.1 haad wapbl_replay_free(wr);
2339 1.1.4.1 haad return error;
2340 1.1.4.1 haad }
2341 1.1.4.1 haad
2342 1.1.4.1 haad error = wapbl_replay_get_inodes(wr);
2343 1.1.4.1 haad if (error) {
2344 1.1.4.1 haad wapbl_replay_stop(wr);
2345 1.1.4.1 haad wapbl_replay_free(wr);
2346 1.1.4.1 haad return error;
2347 1.1.4.1 haad }
2348 1.1.4.1 haad
2349 1.1.4.1 haad *wrp = wr;
2350 1.1.4.1 haad return 0;
2351 1.1.4.1 haad
2352 1.1.4.1 haad errout:
2353 1.1.4.1 haad wapbl_free(scratch);
2354 1.1.4.1 haad return error;
2355 1.1.4.1 haad }
2356 1.1.4.1 haad
2357 1.1.4.1 haad void
2358 1.1.4.1 haad wapbl_replay_stop(struct wapbl_replay *wr)
2359 1.1.4.1 haad {
2360 1.1.4.1 haad
2361 1.1.4.1 haad WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
2362 1.1.4.1 haad
2363 1.1.4.1 haad KDASSERT(wapbl_replay_isopen(wr));
2364 1.1.4.1 haad
2365 1.1.4.1 haad wapbl_free(wr->wr_scratch);
2366 1.1.4.1 haad wr->wr_scratch = 0;
2367 1.1.4.1 haad
2368 1.1.4.1 haad wr->wr_logvp = 0;
2369 1.1.4.1 haad
2370 1.1.4.1 haad wapbl_blkhash_clear(wr);
2371 1.1.4.1 haad wapbl_blkhash_free(wr);
2372 1.1.4.1 haad }
2373 1.1.4.1 haad
2374 1.1.4.1 haad void
2375 1.1.4.1 haad wapbl_replay_free(struct wapbl_replay *wr)
2376 1.1.4.1 haad {
2377 1.1.4.1 haad
2378 1.1.4.1 haad KDASSERT(!wapbl_replay_isopen(wr));
2379 1.1.4.1 haad
2380 1.1.4.1 haad if (wr->wr_inodes)
2381 1.1.4.1 haad wapbl_free(wr->wr_inodes);
2382 1.1.4.1 haad wapbl_free(wr);
2383 1.1.4.1 haad }
2384 1.1.4.1 haad
2385 1.1.4.1 haad int
2386 1.1.4.1 haad wapbl_replay_isopen1(struct wapbl_replay *wr)
2387 1.1.4.1 haad {
2388 1.1.4.1 haad
2389 1.1.4.1 haad return wapbl_replay_isopen(wr);
2390 1.1.4.1 haad }
2391 1.1.4.1 haad
2392 1.1.4.1 haad static int
2393 1.1.4.1 haad wapbl_replay_prescan(struct wapbl_replay *wr)
2394 1.1.4.1 haad {
2395 1.1.4.1 haad off_t off;
2396 1.1.4.1 haad struct wapbl_wc_header *wch = &wr->wr_wc_header;
2397 1.1.4.1 haad int error;
2398 1.1.4.1 haad
2399 1.1.4.1 haad int logblklen = 1<<wch->wc_log_dev_bshift;
2400 1.1.4.1 haad int fsblklen = 1<<wch->wc_fs_dev_bshift;
2401 1.1.4.1 haad
2402 1.1.4.1 haad wapbl_blkhash_clear(wr);
2403 1.1.4.1 haad
2404 1.1.4.1 haad off = wch->wc_tail;
2405 1.1.4.1 haad while (off != wch->wc_head) {
2406 1.1.4.1 haad struct wapbl_wc_null *wcn;
2407 1.1.4.1 haad off_t saveoff = off;
2408 1.1.4.1 haad error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2409 1.1.4.1 haad if (error)
2410 1.1.4.1 haad goto errout;
2411 1.1.4.1 haad wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2412 1.1.4.1 haad switch (wcn->wc_type) {
2413 1.1.4.1 haad case WAPBL_WC_BLOCKS:
2414 1.1.4.1 haad {
2415 1.1.4.1 haad struct wapbl_wc_blocklist *wc =
2416 1.1.4.1 haad (struct wapbl_wc_blocklist *)wr->wr_scratch;
2417 1.1.4.1 haad int i;
2418 1.1.4.1 haad for (i = 0; i < wc->wc_blkcount; i++) {
2419 1.1.4.1 haad int j, n;
2420 1.1.4.1 haad /*
2421 1.1.4.1 haad * Enter each physical block into the
2422 1.1.4.1 haad * hashtable independently
2423 1.1.4.1 haad */
2424 1.1.4.1 haad n = wc->wc_blocks[i].wc_dlen >>
2425 1.1.4.1 haad wch->wc_fs_dev_bshift;
2426 1.1.4.1 haad for (j = 0; j < n; j++) {
2427 1.1.4.1 haad wapbl_blkhash_ins(wr,
2428 1.1.4.1 haad wc->wc_blocks[i].wc_daddr + j,
2429 1.1.4.1 haad off);
2430 1.1.4.1 haad wapbl_circ_advance(wr,
2431 1.1.4.1 haad fsblklen, &off);
2432 1.1.4.1 haad }
2433 1.1.4.1 haad }
2434 1.1.4.1 haad }
2435 1.1.4.1 haad break;
2436 1.1.4.1 haad
2437 1.1.4.1 haad case WAPBL_WC_REVOCATIONS:
2438 1.1.4.1 haad {
2439 1.1.4.1 haad struct wapbl_wc_blocklist *wc =
2440 1.1.4.1 haad (struct wapbl_wc_blocklist *)wr->wr_scratch;
2441 1.1.4.1 haad int i;
2442 1.1.4.1 haad for (i = 0; i < wc->wc_blkcount; i++) {
2443 1.1.4.1 haad int j, n;
2444 1.1.4.1 haad /*
2445 1.1.4.1 haad * Remove any blocks found from the
2446 1.1.4.1 haad * hashtable
2447 1.1.4.1 haad */
2448 1.1.4.1 haad n = wc->wc_blocks[i].wc_dlen >>
2449 1.1.4.1 haad wch->wc_fs_dev_bshift;
2450 1.1.4.1 haad for (j = 0; j < n; j++) {
2451 1.1.4.1 haad wapbl_blkhash_rem(wr,
2452 1.1.4.1 haad wc->wc_blocks[i].wc_daddr + j);
2453 1.1.4.1 haad }
2454 1.1.4.1 haad }
2455 1.1.4.1 haad }
2456 1.1.4.1 haad break;
2457 1.1.4.1 haad
2458 1.1.4.1 haad case WAPBL_WC_INODES:
2459 1.1.4.1 haad {
2460 1.1.4.1 haad struct wapbl_wc_inodelist *wc =
2461 1.1.4.1 haad (struct wapbl_wc_inodelist *)wr->wr_scratch;
2462 1.1.4.1 haad /*
2463 1.1.4.1 haad * Keep track of where we found this so we
2464 1.1.4.1 haad * can use it later
2465 1.1.4.1 haad */
2466 1.1.4.1 haad if (wc->wc_clear) {
2467 1.1.4.1 haad wr->wr_inodestail = saveoff;
2468 1.1.4.1 haad wr->wr_inodescnt = 0;
2469 1.1.4.1 haad }
2470 1.1.4.1 haad if (wr->wr_inodestail)
2471 1.1.4.1 haad wr->wr_inodeshead = off;
2472 1.1.4.1 haad wr->wr_inodescnt += wc->wc_inocnt;
2473 1.1.4.1 haad }
2474 1.1.4.1 haad break;
2475 1.1.4.1 haad default:
2476 1.1.4.1 haad printf("Unrecognized wapbl type: 0x%08x\n",
2477 1.1.4.1 haad wcn->wc_type);
2478 1.1.4.1 haad error = EFTYPE;
2479 1.1.4.1 haad goto errout;
2480 1.1.4.1 haad }
2481 1.1.4.1 haad wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2482 1.1.4.1 haad if (off != saveoff) {
2483 1.1.4.1 haad printf("wapbl_replay: corrupted records\n");
2484 1.1.4.1 haad error = EFTYPE;
2485 1.1.4.1 haad goto errout;
2486 1.1.4.1 haad }
2487 1.1.4.1 haad }
2488 1.1.4.1 haad return 0;
2489 1.1.4.1 haad
2490 1.1.4.1 haad errout:
2491 1.1.4.1 haad wapbl_blkhash_clear(wr);
2492 1.1.4.1 haad return error;
2493 1.1.4.1 haad }
2494 1.1.4.1 haad
2495 1.1.4.1 haad static int
2496 1.1.4.1 haad wapbl_replay_get_inodes(struct wapbl_replay *wr)
2497 1.1.4.1 haad {
2498 1.1.4.1 haad off_t off;
2499 1.1.4.1 haad struct wapbl_wc_header *wch = &wr->wr_wc_header;
2500 1.1.4.1 haad int logblklen = 1<<wch->wc_log_dev_bshift;
2501 1.1.4.1 haad int cnt= 0;
2502 1.1.4.1 haad
2503 1.1.4.1 haad KDASSERT(wapbl_replay_isopen(wr));
2504 1.1.4.1 haad
2505 1.1.4.1 haad if (wr->wr_inodescnt == 0)
2506 1.1.4.1 haad return 0;
2507 1.1.4.1 haad
2508 1.1.4.1 haad KASSERT(!wr->wr_inodes);
2509 1.1.4.1 haad
2510 1.1.4.1 haad wr->wr_inodes = wapbl_malloc(wr->wr_inodescnt*sizeof(wr->wr_inodes[0]));
2511 1.1.4.1 haad
2512 1.1.4.1 haad off = wr->wr_inodestail;
2513 1.1.4.1 haad
2514 1.1.4.1 haad while (off != wr->wr_inodeshead) {
2515 1.1.4.1 haad struct wapbl_wc_null *wcn;
2516 1.1.4.1 haad int error;
2517 1.1.4.1 haad off_t saveoff = off;
2518 1.1.4.1 haad error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2519 1.1.4.1 haad if (error) {
2520 1.1.4.1 haad wapbl_free(wr->wr_inodes);
2521 1.1.4.1 haad wr->wr_inodes = 0;
2522 1.1.4.1 haad return error;
2523 1.1.4.1 haad }
2524 1.1.4.1 haad wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2525 1.1.4.1 haad switch (wcn->wc_type) {
2526 1.1.4.1 haad case WAPBL_WC_BLOCKS:
2527 1.1.4.1 haad case WAPBL_WC_REVOCATIONS:
2528 1.1.4.1 haad break;
2529 1.1.4.1 haad case WAPBL_WC_INODES:
2530 1.1.4.1 haad {
2531 1.1.4.1 haad struct wapbl_wc_inodelist *wc =
2532 1.1.4.1 haad (struct wapbl_wc_inodelist *)wr->wr_scratch;
2533 1.1.4.1 haad /*
2534 1.1.4.1 haad * Keep track of where we found this so we
2535 1.1.4.1 haad * can use it later
2536 1.1.4.1 haad */
2537 1.1.4.1 haad if (wc->wc_clear) {
2538 1.1.4.1 haad cnt = 0;
2539 1.1.4.1 haad }
2540 1.1.4.1 haad /* This memcpy assumes that wr_inodes is
2541 1.1.4.1 haad * laid out the same as wc_inodes. */
2542 1.1.4.1 haad memcpy(&wr->wr_inodes[cnt], wc->wc_inodes,
2543 1.1.4.1 haad wc->wc_inocnt*sizeof(wc->wc_inodes[0]));
2544 1.1.4.1 haad cnt += wc->wc_inocnt;
2545 1.1.4.1 haad }
2546 1.1.4.1 haad break;
2547 1.1.4.1 haad default:
2548 1.1.4.1 haad KASSERT(0);
2549 1.1.4.1 haad }
2550 1.1.4.1 haad off = saveoff;
2551 1.1.4.1 haad wapbl_circ_advance(wr, wcn->wc_len, &off);
2552 1.1.4.1 haad }
2553 1.1.4.1 haad KASSERT(cnt == wr->wr_inodescnt);
2554 1.1.4.1 haad return 0;
2555 1.1.4.1 haad }
2556 1.1.4.1 haad
2557 1.1.4.1 haad #ifdef DEBUG
2558 1.1.4.1 haad int
2559 1.1.4.1 haad wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
2560 1.1.4.1 haad {
2561 1.1.4.1 haad off_t off;
2562 1.1.4.1 haad struct wapbl_wc_header *wch = &wr->wr_wc_header;
2563 1.1.4.1 haad int mismatchcnt = 0;
2564 1.1.4.1 haad int logblklen = 1<<wch->wc_log_dev_bshift;
2565 1.1.4.1 haad int fsblklen = 1<<wch->wc_fs_dev_bshift;
2566 1.1.4.1 haad void *scratch1 = wapbl_malloc(MAXBSIZE);
2567 1.1.4.1 haad void *scratch2 = wapbl_malloc(MAXBSIZE);
2568 1.1.4.1 haad int error = 0;
2569 1.1.4.1 haad
2570 1.1.4.1 haad KDASSERT(wapbl_replay_isopen(wr));
2571 1.1.4.1 haad
2572 1.1.4.1 haad off = wch->wc_tail;
2573 1.1.4.1 haad while (off != wch->wc_head) {
2574 1.1.4.1 haad struct wapbl_wc_null *wcn;
2575 1.1.4.1 haad #ifdef DEBUG
2576 1.1.4.1 haad off_t saveoff = off;
2577 1.1.4.1 haad #endif
2578 1.1.4.1 haad error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2579 1.1.4.1 haad if (error)
2580 1.1.4.1 haad goto out;
2581 1.1.4.1 haad wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2582 1.1.4.1 haad switch (wcn->wc_type) {
2583 1.1.4.1 haad case WAPBL_WC_BLOCKS:
2584 1.1.4.1 haad {
2585 1.1.4.1 haad struct wapbl_wc_blocklist *wc =
2586 1.1.4.1 haad (struct wapbl_wc_blocklist *)wr->wr_scratch;
2587 1.1.4.1 haad int i;
2588 1.1.4.1 haad for (i = 0; i < wc->wc_blkcount; i++) {
2589 1.1.4.1 haad int foundcnt = 0;
2590 1.1.4.1 haad int dirtycnt = 0;
2591 1.1.4.1 haad int j, n;
2592 1.1.4.1 haad /*
2593 1.1.4.1 haad * Check each physical block into the
2594 1.1.4.1 haad * hashtable independently
2595 1.1.4.1 haad */
2596 1.1.4.1 haad n = wc->wc_blocks[i].wc_dlen >>
2597 1.1.4.1 haad wch->wc_fs_dev_bshift;
2598 1.1.4.1 haad for (j = 0; j < n; j++) {
2599 1.1.4.1 haad struct wapbl_blk *wb =
2600 1.1.4.1 haad wapbl_blkhash_get(wr,
2601 1.1.4.1 haad wc->wc_blocks[i].wc_daddr + j);
2602 1.1.4.1 haad if (wb && (wb->wb_off == off)) {
2603 1.1.4.1 haad foundcnt++;
2604 1.1.4.1 haad error =
2605 1.1.4.1 haad wapbl_circ_read(wr,
2606 1.1.4.1 haad scratch1, fsblklen,
2607 1.1.4.1 haad &off);
2608 1.1.4.1 haad if (error)
2609 1.1.4.1 haad goto out;
2610 1.1.4.1 haad error =
2611 1.1.4.1 haad wapbl_read(scratch2,
2612 1.1.4.1 haad fsblklen, fsdevvp,
2613 1.1.4.1 haad wb->wb_blk);
2614 1.1.4.1 haad if (error)
2615 1.1.4.1 haad goto out;
2616 1.1.4.1 haad if (memcmp(scratch1,
2617 1.1.4.1 haad scratch2,
2618 1.1.4.1 haad fsblklen)) {
2619 1.1.4.1 haad printf(
2620 1.1.4.1 haad "wapbl_verify: mismatch block %"PRId64" at off %"PRIdMAX"\n",
2621 1.1.4.1 haad wb->wb_blk, (intmax_t)off);
2622 1.1.4.1 haad dirtycnt++;
2623 1.1.4.1 haad mismatchcnt++;
2624 1.1.4.1 haad }
2625 1.1.4.1 haad } else {
2626 1.1.4.1 haad wapbl_circ_advance(wr,
2627 1.1.4.1 haad fsblklen, &off);
2628 1.1.4.1 haad }
2629 1.1.4.1 haad }
2630 1.1.4.1 haad #if 0
2631 1.1.4.1 haad /*
2632 1.1.4.1 haad * If all of the blocks in an entry
2633 1.1.4.1 haad * are clean, then remove all of its
2634 1.1.4.1 haad * blocks from the hashtable since they
2635 1.1.4.1 haad * never will need replay.
2636 1.1.4.1 haad */
2637 1.1.4.1 haad if ((foundcnt != 0) &&
2638 1.1.4.1 haad (dirtycnt == 0)) {
2639 1.1.4.1 haad off = saveoff;
2640 1.1.4.1 haad wapbl_circ_advance(wr,
2641 1.1.4.1 haad logblklen, &off);
2642 1.1.4.1 haad for (j = 0; j < n; j++) {
2643 1.1.4.1 haad struct wapbl_blk *wb =
2644 1.1.4.1 haad wapbl_blkhash_get(wr,
2645 1.1.4.1 haad wc->wc_blocks[i].wc_daddr + j);
2646 1.1.4.1 haad if (wb &&
2647 1.1.4.1 haad (wb->wb_off == off)) {
2648 1.1.4.1 haad wapbl_blkhash_rem(wr, wb->wb_blk);
2649 1.1.4.1 haad }
2650 1.1.4.1 haad wapbl_circ_advance(wr,
2651 1.1.4.1 haad fsblklen, &off);
2652 1.1.4.1 haad }
2653 1.1.4.1 haad }
2654 1.1.4.1 haad #endif
2655 1.1.4.1 haad }
2656 1.1.4.1 haad }
2657 1.1.4.1 haad break;
2658 1.1.4.1 haad case WAPBL_WC_REVOCATIONS:
2659 1.1.4.1 haad case WAPBL_WC_INODES:
2660 1.1.4.1 haad break;
2661 1.1.4.1 haad default:
2662 1.1.4.1 haad KASSERT(0);
2663 1.1.4.1 haad }
2664 1.1.4.1 haad #ifdef DEBUG
2665 1.1.4.1 haad wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2666 1.1.4.1 haad KASSERT(off == saveoff);
2667 1.1.4.1 haad #endif
2668 1.1.4.1 haad }
2669 1.1.4.1 haad out:
2670 1.1.4.1 haad wapbl_free(scratch1);
2671 1.1.4.1 haad wapbl_free(scratch2);
2672 1.1.4.1 haad if (!error && mismatchcnt)
2673 1.1.4.1 haad error = EFTYPE;
2674 1.1.4.1 haad return error;
2675 1.1.4.1 haad }
2676 1.1.4.1 haad #endif
2677 1.1.4.1 haad
2678 1.1.4.1 haad int
2679 1.1.4.1 haad wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
2680 1.1.4.1 haad {
2681 1.1.4.1 haad off_t off;
2682 1.1.4.1 haad struct wapbl_wc_header *wch = &wr->wr_wc_header;
2683 1.1.4.1 haad int logblklen = 1<<wch->wc_log_dev_bshift;
2684 1.1.4.1 haad int fsblklen = 1<<wch->wc_fs_dev_bshift;
2685 1.1.4.1 haad void *scratch1 = wapbl_malloc(MAXBSIZE);
2686 1.1.4.1 haad int error = 0;
2687 1.1.4.1 haad
2688 1.1.4.1 haad KDASSERT(wapbl_replay_isopen(wr));
2689 1.1.4.1 haad
2690 1.1.4.1 haad /*
2691 1.1.4.1 haad * This parses the journal for replay, although it could
2692 1.1.4.1 haad * just as easily walk the hashtable instead.
2693 1.1.4.1 haad */
2694 1.1.4.1 haad
2695 1.1.4.1 haad off = wch->wc_tail;
2696 1.1.4.1 haad while (off != wch->wc_head) {
2697 1.1.4.1 haad struct wapbl_wc_null *wcn;
2698 1.1.4.1 haad #ifdef DEBUG
2699 1.1.4.1 haad off_t saveoff = off;
2700 1.1.4.1 haad #endif
2701 1.1.4.1 haad error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2702 1.1.4.1 haad if (error)
2703 1.1.4.1 haad goto out;
2704 1.1.4.1 haad wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2705 1.1.4.1 haad switch (wcn->wc_type) {
2706 1.1.4.1 haad case WAPBL_WC_BLOCKS:
2707 1.1.4.1 haad {
2708 1.1.4.1 haad struct wapbl_wc_blocklist *wc =
2709 1.1.4.1 haad (struct wapbl_wc_blocklist *)wr->wr_scratch;
2710 1.1.4.1 haad int i;
2711 1.1.4.1 haad for (i = 0; i < wc->wc_blkcount; i++) {
2712 1.1.4.1 haad int j, n;
2713 1.1.4.1 haad /*
2714 1.1.4.1 haad * Check each physical block against
2715 1.1.4.1 haad * the hashtable independently
2716 1.1.4.1 haad */
2717 1.1.4.1 haad n = wc->wc_blocks[i].wc_dlen >>
2718 1.1.4.1 haad wch->wc_fs_dev_bshift;
2719 1.1.4.1 haad for (j = 0; j < n; j++) {
2720 1.1.4.1 haad struct wapbl_blk *wb =
2721 1.1.4.1 haad wapbl_blkhash_get(wr,
2722 1.1.4.1 haad wc->wc_blocks[i].wc_daddr + j);
2723 1.1.4.1 haad if (wb && (wb->wb_off == off)) {
2724 1.1.4.1 haad error = wapbl_circ_read(
2725 1.1.4.1 haad wr, scratch1,
2726 1.1.4.1 haad fsblklen, &off);
2727 1.1.4.1 haad if (error)
2728 1.1.4.1 haad goto out;
2729 1.1.4.1 haad error =
2730 1.1.4.1 haad wapbl_write(scratch1,
2731 1.1.4.1 haad fsblklen, fsdevvp,
2732 1.1.4.1 haad wb->wb_blk);
2733 1.1.4.1 haad if (error)
2734 1.1.4.1 haad goto out;
2735 1.1.4.1 haad } else {
2736 1.1.4.1 haad wapbl_circ_advance(wr,
2737 1.1.4.1 haad fsblklen, &off);
2738 1.1.4.1 haad }
2739 1.1.4.1 haad }
2740 1.1.4.1 haad }
2741 1.1.4.1 haad }
2742 1.1.4.1 haad break;
2743 1.1.4.1 haad case WAPBL_WC_REVOCATIONS:
2744 1.1.4.1 haad case WAPBL_WC_INODES:
2745 1.1.4.1 haad break;
2746 1.1.4.1 haad default:
2747 1.1.4.1 haad KASSERT(0);
2748 1.1.4.1 haad }
2749 1.1.4.1 haad #ifdef DEBUG
2750 1.1.4.1 haad wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2751 1.1.4.1 haad KASSERT(off == saveoff);
2752 1.1.4.1 haad #endif
2753 1.1.4.1 haad }
2754 1.1.4.1 haad out:
2755 1.1.4.1 haad wapbl_free(scratch1);
2756 1.1.4.1 haad return error;
2757 1.1.4.1 haad }
2758 1.1.4.1 haad
2759 1.1.4.1 haad int
2760 1.1.4.1 haad wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
2761 1.1.4.1 haad {
2762 1.1.4.1 haad struct wapbl_wc_header *wch = &wr->wr_wc_header;
2763 1.1.4.1 haad int fsblklen = 1<<wch->wc_fs_dev_bshift;
2764 1.1.4.1 haad
2765 1.1.4.1 haad KDASSERT(wapbl_replay_isopen(wr));
2766 1.1.4.1 haad
2767 1.1.4.1 haad KASSERT((len % fsblklen) == 0);
2768 1.1.4.1 haad
2769 1.1.4.1 haad while (len != 0) {
2770 1.1.4.1 haad struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2771 1.1.4.1 haad if (wb) {
2772 1.1.4.1 haad off_t off = wb->wb_off;
2773 1.1.4.1 haad int error;
2774 1.1.4.1 haad error = wapbl_circ_read(wr, data, fsblklen, &off);
2775 1.1.4.1 haad if (error)
2776 1.1.4.1 haad return error;
2777 1.1.4.1 haad }
2778 1.1.4.1 haad data = (uint8_t *)data + fsblklen;
2779 1.1.4.1 haad len -= fsblklen;
2780 1.1.4.1 haad blk++;
2781 1.1.4.1 haad }
2782 1.1.4.1 haad return 0;
2783 1.1.4.1 haad }
2784