vfs_wapbl.c revision 1.3.4.3 1 1.3.4.2 mjf /* $NetBSD: vfs_wapbl.c,v 1.3.4.3 2009/01/17 13:29:21 mjf Exp $ */
2 1.3.4.2 mjf
3 1.3.4.2 mjf /*-
4 1.3.4.2 mjf * Copyright (c) 2003,2008 The NetBSD Foundation, Inc.
5 1.3.4.2 mjf * All rights reserved.
6 1.3.4.2 mjf *
7 1.3.4.2 mjf * This code is derived from software contributed to The NetBSD Foundation
8 1.3.4.2 mjf * by Wasabi Systems, Inc.
9 1.3.4.2 mjf *
10 1.3.4.2 mjf * Redistribution and use in source and binary forms, with or without
11 1.3.4.2 mjf * modification, are permitted provided that the following conditions
12 1.3.4.2 mjf * are met:
13 1.3.4.2 mjf * 1. Redistributions of source code must retain the above copyright
14 1.3.4.2 mjf * notice, this list of conditions and the following disclaimer.
15 1.3.4.2 mjf * 2. Redistributions in binary form must reproduce the above copyright
16 1.3.4.2 mjf * notice, this list of conditions and the following disclaimer in the
17 1.3.4.2 mjf * documentation and/or other materials provided with the distribution.
18 1.3.4.2 mjf *
19 1.3.4.2 mjf * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.3.4.2 mjf * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.3.4.2 mjf * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.3.4.2 mjf * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.3.4.2 mjf * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.3.4.2 mjf * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.3.4.2 mjf * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.3.4.2 mjf * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.3.4.2 mjf * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.3.4.2 mjf * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.3.4.2 mjf * POSSIBILITY OF SUCH DAMAGE.
30 1.3.4.2 mjf */
31 1.3.4.2 mjf
32 1.3.4.2 mjf /*
33 1.3.4.2 mjf * This implements file system independent write ahead filesystem logging.
34 1.3.4.2 mjf */
35 1.3.4.3 mjf
36 1.3.4.3 mjf #define WAPBL_INTERNAL
37 1.3.4.3 mjf
38 1.3.4.2 mjf #include <sys/cdefs.h>
39 1.3.4.2 mjf __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.3.4.3 2009/01/17 13:29:21 mjf Exp $");
40 1.3.4.2 mjf
41 1.3.4.2 mjf #include <sys/param.h>
42 1.3.4.2 mjf
43 1.3.4.2 mjf #ifdef _KERNEL
44 1.3.4.2 mjf #include <sys/param.h>
45 1.3.4.2 mjf #include <sys/namei.h>
46 1.3.4.2 mjf #include <sys/proc.h>
47 1.3.4.2 mjf #include <sys/uio.h>
48 1.3.4.2 mjf #include <sys/vnode.h>
49 1.3.4.2 mjf #include <sys/file.h>
50 1.3.4.2 mjf #include <sys/malloc.h>
51 1.3.4.2 mjf #include <sys/resourcevar.h>
52 1.3.4.2 mjf #include <sys/conf.h>
53 1.3.4.2 mjf #include <sys/mount.h>
54 1.3.4.2 mjf #include <sys/kernel.h>
55 1.3.4.2 mjf #include <sys/kauth.h>
56 1.3.4.2 mjf #include <sys/mutex.h>
57 1.3.4.2 mjf #include <sys/atomic.h>
58 1.3.4.2 mjf #include <sys/wapbl.h>
59 1.3.4.3 mjf #include <sys/wapbl_replay.h>
60 1.3.4.2 mjf
61 1.3.4.2 mjf #if WAPBL_UVM_ALLOC
62 1.3.4.2 mjf #include <uvm/uvm.h>
63 1.3.4.2 mjf #endif
64 1.3.4.2 mjf
65 1.3.4.2 mjf #include <miscfs/specfs/specdev.h>
66 1.3.4.2 mjf
67 1.3.4.2 mjf MALLOC_JUSTDEFINE(M_WAPBL, "wapbl", "write-ahead physical block logging");
68 1.3.4.2 mjf #define wapbl_malloc(s) malloc((s), M_WAPBL, M_WAITOK)
69 1.3.4.2 mjf #define wapbl_free(a) free((a), M_WAPBL)
70 1.3.4.2 mjf #define wapbl_calloc(n, s) malloc((n)*(s), M_WAPBL, M_WAITOK | M_ZERO)
71 1.3.4.3 mjf #define wapbl_realloc(ptr, s) realloc((ptr), (s), M_WAPBL, M_WAITOK | M_ZERO)
72 1.3.4.2 mjf
73 1.3.4.2 mjf #else /* !_KERNEL */
74 1.3.4.2 mjf #include <assert.h>
75 1.3.4.2 mjf #include <errno.h>
76 1.3.4.2 mjf #include <stdio.h>
77 1.3.4.2 mjf #include <stdbool.h>
78 1.3.4.2 mjf #include <stdlib.h>
79 1.3.4.2 mjf #include <string.h>
80 1.3.4.2 mjf
81 1.3.4.2 mjf #include <sys/time.h>
82 1.3.4.2 mjf #include <sys/wapbl.h>
83 1.3.4.3 mjf #include <sys/wapbl_replay.h>
84 1.3.4.2 mjf
85 1.3.4.2 mjf #define KDASSERT(x) assert(x)
86 1.3.4.2 mjf #define KASSERT(x) assert(x)
87 1.3.4.2 mjf #define wapbl_malloc(s) malloc(s)
88 1.3.4.2 mjf #define wapbl_free(a) free(a)
89 1.3.4.2 mjf #define wapbl_calloc(n, s) calloc((n), (s))
90 1.3.4.3 mjf #define wapbl_realloc(ptr, s) realloc((ptr), (s))
91 1.3.4.2 mjf
92 1.3.4.2 mjf #endif /* !_KERNEL */
93 1.3.4.2 mjf
94 1.3.4.2 mjf /*
95 1.3.4.2 mjf * INTERNAL DATA STRUCTURES
96 1.3.4.2 mjf */
97 1.3.4.2 mjf
98 1.3.4.2 mjf /*
99 1.3.4.2 mjf * This structure holds per-mount log information.
100 1.3.4.2 mjf *
101 1.3.4.2 mjf * Legend: a = atomic access only
102 1.3.4.2 mjf * r = read-only after init
103 1.3.4.2 mjf * l = rwlock held
104 1.3.4.2 mjf * m = mutex held
105 1.3.4.2 mjf * u = unlocked access ok
106 1.3.4.2 mjf * b = bufcache_lock held
107 1.3.4.2 mjf */
108 1.3.4.2 mjf struct wapbl {
109 1.3.4.2 mjf struct vnode *wl_logvp; /* r: log here */
110 1.3.4.2 mjf struct vnode *wl_devvp; /* r: log on this device */
111 1.3.4.2 mjf struct mount *wl_mount; /* r: mountpoint wl is associated with */
112 1.3.4.2 mjf daddr_t wl_logpbn; /* r: Physical block number of start of log */
113 1.3.4.2 mjf int wl_log_dev_bshift; /* r: logarithm of device block size of log
114 1.3.4.2 mjf device */
115 1.3.4.2 mjf int wl_fs_dev_bshift; /* r: logarithm of device block size of
116 1.3.4.2 mjf filesystem device */
117 1.3.4.2 mjf
118 1.3.4.2 mjf unsigned wl_lock_count; /* m: Count of transactions in progress */
119 1.3.4.2 mjf
120 1.3.4.2 mjf size_t wl_circ_size; /* r: Number of bytes in buffer of log */
121 1.3.4.2 mjf size_t wl_circ_off; /* r: Number of bytes reserved at start */
122 1.3.4.2 mjf
123 1.3.4.2 mjf size_t wl_bufcount_max; /* r: Number of buffers reserved for log */
124 1.3.4.2 mjf size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */
125 1.3.4.2 mjf
126 1.3.4.2 mjf off_t wl_head; /* l: Byte offset of log head */
127 1.3.4.2 mjf off_t wl_tail; /* l: Byte offset of log tail */
128 1.3.4.2 mjf /*
129 1.3.4.2 mjf * head == tail == 0 means log is empty
130 1.3.4.2 mjf * head == tail != 0 means log is full
131 1.3.4.2 mjf * see assertions in wapbl_advance() for other boundary conditions.
132 1.3.4.2 mjf * only truncate moves the tail, except when flush sets it to
133 1.3.4.2 mjf * wl_header_size only flush moves the head, except when truncate
134 1.3.4.2 mjf * sets it to 0.
135 1.3.4.2 mjf */
136 1.3.4.2 mjf
137 1.3.4.2 mjf struct wapbl_wc_header *wl_wc_header; /* l */
138 1.3.4.2 mjf void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */
139 1.3.4.2 mjf
140 1.3.4.2 mjf kmutex_t wl_mtx; /* u: short-term lock */
141 1.3.4.2 mjf krwlock_t wl_rwlock; /* u: File system transaction lock */
142 1.3.4.2 mjf
143 1.3.4.2 mjf /*
144 1.3.4.2 mjf * Must be held while accessing
145 1.3.4.2 mjf * wl_count or wl_bufs or head or tail
146 1.3.4.2 mjf */
147 1.3.4.2 mjf
148 1.3.4.2 mjf /*
149 1.3.4.2 mjf * Callback called from within the flush routine to flush any extra
150 1.3.4.2 mjf * bits. Note that flush may be skipped without calling this if
151 1.3.4.2 mjf * there are no outstanding buffers in the transaction.
152 1.3.4.2 mjf */
153 1.3.4.3 mjf #if _KERNEL
154 1.3.4.2 mjf wapbl_flush_fn_t wl_flush; /* r */
155 1.3.4.2 mjf wapbl_flush_fn_t wl_flush_abort;/* r */
156 1.3.4.3 mjf #endif
157 1.3.4.2 mjf
158 1.3.4.2 mjf size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */
159 1.3.4.2 mjf size_t wl_bufcount; /* m: Count of buffers in wl_bufs */
160 1.3.4.2 mjf size_t wl_bcount; /* m: Total bcount of wl_bufs */
161 1.3.4.2 mjf
162 1.3.4.2 mjf LIST_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
163 1.3.4.2 mjf
164 1.3.4.2 mjf kcondvar_t wl_reclaimable_cv; /* m (obviously) */
165 1.3.4.2 mjf size_t wl_reclaimable_bytes; /* m: Amount of space available for
166 1.3.4.2 mjf reclamation by truncate */
167 1.3.4.2 mjf int wl_error_count; /* m: # of wl_entries with errors */
168 1.3.4.2 mjf size_t wl_reserved_bytes; /* never truncate log smaller than this */
169 1.3.4.2 mjf
170 1.3.4.2 mjf #ifdef WAPBL_DEBUG_BUFBYTES
171 1.3.4.2 mjf size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
172 1.3.4.2 mjf #endif
173 1.3.4.2 mjf
174 1.3.4.2 mjf daddr_t *wl_deallocblks;/* l: address of block */
175 1.3.4.2 mjf int *wl_dealloclens; /* l: size of block (fragments, kom ihg) */
176 1.3.4.2 mjf int wl_dealloccnt; /* l: total count */
177 1.3.4.2 mjf int wl_dealloclim; /* l: max count */
178 1.3.4.2 mjf
179 1.3.4.2 mjf /* hashtable of inode numbers for allocated but unlinked inodes */
180 1.3.4.2 mjf /* synch ??? */
181 1.3.4.2 mjf LIST_HEAD(wapbl_ino_head, wapbl_ino) *wl_inohash;
182 1.3.4.2 mjf u_long wl_inohashmask;
183 1.3.4.2 mjf int wl_inohashcnt;
184 1.3.4.2 mjf
185 1.3.4.2 mjf SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* On disk transaction
186 1.3.4.2 mjf accounting */
187 1.3.4.2 mjf };
188 1.3.4.2 mjf
189 1.3.4.2 mjf #ifdef WAPBL_DEBUG_PRINT
190 1.3.4.2 mjf int wapbl_debug_print = WAPBL_DEBUG_PRINT;
191 1.3.4.2 mjf #endif
192 1.3.4.2 mjf
193 1.3.4.2 mjf /****************************************************************/
194 1.3.4.2 mjf #ifdef _KERNEL
195 1.3.4.2 mjf
196 1.3.4.2 mjf #ifdef WAPBL_DEBUG
197 1.3.4.2 mjf struct wapbl *wapbl_debug_wl;
198 1.3.4.2 mjf #endif
199 1.3.4.2 mjf
200 1.3.4.2 mjf static int wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail);
201 1.3.4.2 mjf static int wapbl_write_blocks(struct wapbl *wl, off_t *offp);
202 1.3.4.2 mjf static int wapbl_write_revocations(struct wapbl *wl, off_t *offp);
203 1.3.4.2 mjf static int wapbl_write_inodes(struct wapbl *wl, off_t *offp);
204 1.3.4.2 mjf #endif /* _KERNEL */
205 1.3.4.2 mjf
206 1.3.4.3 mjf static int wapbl_replay_process(struct wapbl_replay *wr, off_t, off_t);
207 1.3.4.2 mjf
208 1.3.4.2 mjf static __inline size_t wapbl_space_free(size_t avail, off_t head,
209 1.3.4.2 mjf off_t tail);
210 1.3.4.2 mjf static __inline size_t wapbl_space_used(size_t avail, off_t head,
211 1.3.4.2 mjf off_t tail);
212 1.3.4.2 mjf
213 1.3.4.2 mjf #ifdef _KERNEL
214 1.3.4.2 mjf
215 1.3.4.2 mjf #define WAPBL_INODETRK_SIZE 83
216 1.3.4.2 mjf static int wapbl_ino_pool_refcount;
217 1.3.4.2 mjf static struct pool wapbl_ino_pool;
218 1.3.4.2 mjf struct wapbl_ino {
219 1.3.4.2 mjf LIST_ENTRY(wapbl_ino) wi_hash;
220 1.3.4.2 mjf ino_t wi_ino;
221 1.3.4.2 mjf mode_t wi_mode;
222 1.3.4.2 mjf };
223 1.3.4.2 mjf
224 1.3.4.2 mjf static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
225 1.3.4.2 mjf static void wapbl_inodetrk_free(struct wapbl *wl);
226 1.3.4.2 mjf static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
227 1.3.4.2 mjf
228 1.3.4.2 mjf static size_t wapbl_transaction_len(struct wapbl *wl);
229 1.3.4.2 mjf static __inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
230 1.3.4.2 mjf
231 1.3.4.3 mjf #if 0
232 1.3.4.3 mjf int wapbl_replay_verify(struct wapbl_replay *, struct vnode *);
233 1.3.4.3 mjf #endif
234 1.3.4.3 mjf
235 1.3.4.3 mjf static int wapbl_replay_isopen1(struct wapbl_replay *);
236 1.3.4.3 mjf
237 1.3.4.2 mjf /*
238 1.3.4.2 mjf * This is useful for debugging. If set, the log will
239 1.3.4.2 mjf * only be truncated when necessary.
240 1.3.4.2 mjf */
241 1.3.4.2 mjf int wapbl_lazy_truncate = 0;
242 1.3.4.2 mjf
243 1.3.4.2 mjf struct wapbl_ops wapbl_ops = {
244 1.3.4.2 mjf .wo_wapbl_discard = wapbl_discard,
245 1.3.4.2 mjf .wo_wapbl_replay_isopen = wapbl_replay_isopen1,
246 1.3.4.3 mjf .wo_wapbl_replay_can_read = wapbl_replay_can_read,
247 1.3.4.2 mjf .wo_wapbl_replay_read = wapbl_replay_read,
248 1.3.4.2 mjf .wo_wapbl_add_buf = wapbl_add_buf,
249 1.3.4.2 mjf .wo_wapbl_remove_buf = wapbl_remove_buf,
250 1.3.4.2 mjf .wo_wapbl_resize_buf = wapbl_resize_buf,
251 1.3.4.2 mjf .wo_wapbl_begin = wapbl_begin,
252 1.3.4.2 mjf .wo_wapbl_end = wapbl_end,
253 1.3.4.2 mjf .wo_wapbl_junlock_assert= wapbl_junlock_assert,
254 1.3.4.2 mjf
255 1.3.4.2 mjf /* XXX: the following is only used to say "this is a wapbl buf" */
256 1.3.4.2 mjf .wo_wapbl_biodone = wapbl_biodone,
257 1.3.4.2 mjf };
258 1.3.4.2 mjf
259 1.3.4.2 mjf void
260 1.3.4.2 mjf wapbl_init()
261 1.3.4.2 mjf {
262 1.3.4.2 mjf
263 1.3.4.2 mjf malloc_type_attach(M_WAPBL);
264 1.3.4.2 mjf }
265 1.3.4.2 mjf
266 1.3.4.3 mjf static int
267 1.3.4.3 mjf wapbl_start_flush_inodes(struct wapbl *wl, struct wapbl_replay *wr)
268 1.3.4.3 mjf {
269 1.3.4.3 mjf int error, i;
270 1.3.4.3 mjf
271 1.3.4.3 mjf WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
272 1.3.4.3 mjf ("wapbl_start: reusing log with %d inodes\n", wr->wr_inodescnt));
273 1.3.4.3 mjf
274 1.3.4.3 mjf /*
275 1.3.4.3 mjf * Its only valid to reuse the replay log if its
276 1.3.4.3 mjf * the same as the new log we just opened.
277 1.3.4.3 mjf */
278 1.3.4.3 mjf KDASSERT(!wapbl_replay_isopen(wr));
279 1.3.4.3 mjf KASSERT(wl->wl_devvp->v_rdev == wr->wr_devvp->v_rdev);
280 1.3.4.3 mjf KASSERT(wl->wl_logpbn == wr->wr_logpbn);
281 1.3.4.3 mjf KASSERT(wl->wl_circ_size == wr->wr_circ_size);
282 1.3.4.3 mjf KASSERT(wl->wl_circ_off == wr->wr_circ_off);
283 1.3.4.3 mjf KASSERT(wl->wl_log_dev_bshift == wr->wr_log_dev_bshift);
284 1.3.4.3 mjf KASSERT(wl->wl_fs_dev_bshift == wr->wr_fs_dev_bshift);
285 1.3.4.3 mjf
286 1.3.4.3 mjf wl->wl_wc_header->wc_generation = wr->wr_generation + 1;
287 1.3.4.3 mjf
288 1.3.4.3 mjf for (i = 0; i < wr->wr_inodescnt; i++)
289 1.3.4.3 mjf wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
290 1.3.4.3 mjf wr->wr_inodes[i].wr_imode);
291 1.3.4.3 mjf
292 1.3.4.3 mjf /* Make sure new transaction won't overwrite old inodes list */
293 1.3.4.3 mjf KDASSERT(wapbl_transaction_len(wl) <=
294 1.3.4.3 mjf wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
295 1.3.4.3 mjf wr->wr_inodestail));
296 1.3.4.3 mjf
297 1.3.4.3 mjf wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
298 1.3.4.3 mjf wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
299 1.3.4.3 mjf wapbl_transaction_len(wl);
300 1.3.4.3 mjf
301 1.3.4.3 mjf error = wapbl_write_inodes(wl, &wl->wl_head);
302 1.3.4.3 mjf if (error)
303 1.3.4.3 mjf return error;
304 1.3.4.3 mjf
305 1.3.4.3 mjf KASSERT(wl->wl_head != wl->wl_tail);
306 1.3.4.3 mjf KASSERT(wl->wl_head != 0);
307 1.3.4.3 mjf
308 1.3.4.3 mjf return 0;
309 1.3.4.3 mjf }
310 1.3.4.3 mjf
311 1.3.4.2 mjf int
312 1.3.4.2 mjf wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
313 1.3.4.2 mjf daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
314 1.3.4.2 mjf wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
315 1.3.4.2 mjf {
316 1.3.4.2 mjf struct wapbl *wl;
317 1.3.4.2 mjf struct vnode *devvp;
318 1.3.4.2 mjf daddr_t logpbn;
319 1.3.4.2 mjf int error;
320 1.3.4.2 mjf int log_dev_bshift = DEV_BSHIFT;
321 1.3.4.2 mjf int fs_dev_bshift = DEV_BSHIFT;
322 1.3.4.2 mjf int run;
323 1.3.4.2 mjf
324 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_start: vp=%p off=%" PRId64
325 1.3.4.2 mjf " count=%zu blksize=%zu\n", vp, off, count, blksize));
326 1.3.4.2 mjf
327 1.3.4.2 mjf if (log_dev_bshift > fs_dev_bshift) {
328 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_OPEN,
329 1.3.4.2 mjf ("wapbl: log device's block size cannot be larger "
330 1.3.4.2 mjf "than filesystem's\n"));
331 1.3.4.2 mjf /*
332 1.3.4.2 mjf * Not currently implemented, although it could be if
333 1.3.4.2 mjf * needed someday.
334 1.3.4.2 mjf */
335 1.3.4.2 mjf return ENOSYS;
336 1.3.4.2 mjf }
337 1.3.4.2 mjf
338 1.3.4.2 mjf if (off < 0)
339 1.3.4.2 mjf return EINVAL;
340 1.3.4.2 mjf
341 1.3.4.2 mjf if (blksize < DEV_BSIZE)
342 1.3.4.2 mjf return EINVAL;
343 1.3.4.2 mjf if (blksize % DEV_BSIZE)
344 1.3.4.2 mjf return EINVAL;
345 1.3.4.2 mjf
346 1.3.4.2 mjf /* XXXTODO: verify that the full load is writable */
347 1.3.4.2 mjf
348 1.3.4.2 mjf /*
349 1.3.4.2 mjf * XXX check for minimum log size
350 1.3.4.2 mjf * minimum is governed by minimum amount of space
351 1.3.4.2 mjf * to complete a transaction. (probably truncate)
352 1.3.4.2 mjf */
353 1.3.4.2 mjf /* XXX for now pick something minimal */
354 1.3.4.2 mjf if ((count * blksize) < MAXPHYS) {
355 1.3.4.2 mjf return ENOSPC;
356 1.3.4.2 mjf }
357 1.3.4.2 mjf
358 1.3.4.2 mjf if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
359 1.3.4.2 mjf return error;
360 1.3.4.2 mjf }
361 1.3.4.2 mjf
362 1.3.4.2 mjf wl = wapbl_calloc(1, sizeof(*wl));
363 1.3.4.2 mjf rw_init(&wl->wl_rwlock);
364 1.3.4.2 mjf mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
365 1.3.4.2 mjf cv_init(&wl->wl_reclaimable_cv, "wapblrec");
366 1.3.4.2 mjf LIST_INIT(&wl->wl_bufs);
367 1.3.4.2 mjf SIMPLEQ_INIT(&wl->wl_entries);
368 1.3.4.2 mjf
369 1.3.4.2 mjf wl->wl_logvp = vp;
370 1.3.4.2 mjf wl->wl_devvp = devvp;
371 1.3.4.2 mjf wl->wl_mount = mp;
372 1.3.4.2 mjf wl->wl_logpbn = logpbn;
373 1.3.4.2 mjf wl->wl_log_dev_bshift = log_dev_bshift;
374 1.3.4.2 mjf wl->wl_fs_dev_bshift = fs_dev_bshift;
375 1.3.4.2 mjf
376 1.3.4.2 mjf wl->wl_flush = flushfn;
377 1.3.4.2 mjf wl->wl_flush_abort = flushabortfn;
378 1.3.4.2 mjf
379 1.3.4.2 mjf /* Reserve two log device blocks for the commit headers */
380 1.3.4.2 mjf wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
381 1.3.4.2 mjf wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
382 1.3.4.2 mjf /* truncate the log usage to a multiple of log_dev_bshift */
383 1.3.4.2 mjf wl->wl_circ_size >>= wl->wl_log_dev_bshift;
384 1.3.4.2 mjf wl->wl_circ_size <<= wl->wl_log_dev_bshift;
385 1.3.4.2 mjf
386 1.3.4.2 mjf /*
387 1.3.4.2 mjf * wl_bufbytes_max limits the size of the in memory transaction space.
388 1.3.4.2 mjf * - Since buffers are allocated and accounted for in units of
389 1.3.4.2 mjf * PAGE_SIZE it is required to be a multiple of PAGE_SIZE
390 1.3.4.2 mjf * (i.e. 1<<PAGE_SHIFT)
391 1.3.4.2 mjf * - Since the log device has to be written in units of
392 1.3.4.2 mjf * 1<<wl_log_dev_bshift it is required to be a mulitple of
393 1.3.4.2 mjf * 1<<wl_log_dev_bshift.
394 1.3.4.2 mjf * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
395 1.3.4.2 mjf * it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
396 1.3.4.2 mjf * Therefore it must be multiple of the least common multiple of those
397 1.3.4.2 mjf * three quantities. Fortunately, all of those quantities are
398 1.3.4.2 mjf * guaranteed to be a power of two, and the least common multiple of
399 1.3.4.2 mjf * a set of numbers which are all powers of two is simply the maximum
400 1.3.4.2 mjf * of those numbers. Finally, the maximum logarithm of a power of two
401 1.3.4.2 mjf * is the same as the log of the maximum power of two. So we can do
402 1.3.4.2 mjf * the following operations to size wl_bufbytes_max:
403 1.3.4.2 mjf */
404 1.3.4.2 mjf
405 1.3.4.2 mjf /* XXX fix actual number of pages reserved per filesystem. */
406 1.3.4.2 mjf wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
407 1.3.4.2 mjf
408 1.3.4.2 mjf /* Round wl_bufbytes_max to the largest power of two constraint */
409 1.3.4.2 mjf wl->wl_bufbytes_max >>= PAGE_SHIFT;
410 1.3.4.2 mjf wl->wl_bufbytes_max <<= PAGE_SHIFT;
411 1.3.4.2 mjf wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
412 1.3.4.2 mjf wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
413 1.3.4.2 mjf wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
414 1.3.4.2 mjf wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
415 1.3.4.2 mjf
416 1.3.4.2 mjf /* XXX maybe use filesystem fragment size instead of 1024 */
417 1.3.4.2 mjf /* XXX fix actual number of buffers reserved per filesystem. */
418 1.3.4.2 mjf wl->wl_bufcount_max = (nbuf / 2) * 1024;
419 1.3.4.2 mjf
420 1.3.4.2 mjf /* XXX tie this into resource estimation */
421 1.3.4.2 mjf wl->wl_dealloclim = 2 * btodb(wl->wl_bufbytes_max);
422 1.3.4.2 mjf
423 1.3.4.2 mjf #if WAPBL_UVM_ALLOC
424 1.3.4.2 mjf wl->wl_deallocblks = (void *) uvm_km_zalloc(kernel_map,
425 1.3.4.2 mjf round_page(sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim));
426 1.3.4.2 mjf KASSERT(wl->wl_deallocblks != NULL);
427 1.3.4.2 mjf wl->wl_dealloclens = (void *) uvm_km_zalloc(kernel_map,
428 1.3.4.2 mjf round_page(sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim));
429 1.3.4.2 mjf KASSERT(wl->wl_dealloclens != NULL);
430 1.3.4.2 mjf #else
431 1.3.4.2 mjf wl->wl_deallocblks = wapbl_malloc(sizeof(*wl->wl_deallocblks) *
432 1.3.4.2 mjf wl->wl_dealloclim);
433 1.3.4.2 mjf wl->wl_dealloclens = wapbl_malloc(sizeof(*wl->wl_dealloclens) *
434 1.3.4.2 mjf wl->wl_dealloclim);
435 1.3.4.2 mjf #endif
436 1.3.4.2 mjf
437 1.3.4.2 mjf wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
438 1.3.4.2 mjf
439 1.3.4.2 mjf /* Initialize the commit header */
440 1.3.4.2 mjf {
441 1.3.4.2 mjf struct wapbl_wc_header *wc;
442 1.3.4.3 mjf size_t len = 1 << wl->wl_log_dev_bshift;
443 1.3.4.2 mjf wc = wapbl_calloc(1, len);
444 1.3.4.2 mjf wc->wc_type = WAPBL_WC_HEADER;
445 1.3.4.2 mjf wc->wc_len = len;
446 1.3.4.2 mjf wc->wc_circ_off = wl->wl_circ_off;
447 1.3.4.2 mjf wc->wc_circ_size = wl->wl_circ_size;
448 1.3.4.2 mjf /* XXX wc->wc_fsid */
449 1.3.4.2 mjf wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
450 1.3.4.2 mjf wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
451 1.3.4.2 mjf wl->wl_wc_header = wc;
452 1.3.4.2 mjf wl->wl_wc_scratch = wapbl_malloc(len);
453 1.3.4.2 mjf }
454 1.3.4.2 mjf
455 1.3.4.2 mjf /*
456 1.3.4.2 mjf * if there was an existing set of unlinked but
457 1.3.4.2 mjf * allocated inodes, preserve it in the new
458 1.3.4.2 mjf * log.
459 1.3.4.2 mjf */
460 1.3.4.2 mjf if (wr && wr->wr_inodescnt) {
461 1.3.4.3 mjf error = wapbl_start_flush_inodes(wl, wr);
462 1.3.4.2 mjf if (error)
463 1.3.4.2 mjf goto errout;
464 1.3.4.2 mjf }
465 1.3.4.2 mjf
466 1.3.4.2 mjf error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
467 1.3.4.2 mjf if (error) {
468 1.3.4.2 mjf goto errout;
469 1.3.4.2 mjf }
470 1.3.4.2 mjf
471 1.3.4.2 mjf *wlp = wl;
472 1.3.4.2 mjf #if defined(WAPBL_DEBUG)
473 1.3.4.2 mjf wapbl_debug_wl = wl;
474 1.3.4.2 mjf #endif
475 1.3.4.2 mjf
476 1.3.4.2 mjf return 0;
477 1.3.4.2 mjf errout:
478 1.3.4.2 mjf wapbl_discard(wl);
479 1.3.4.2 mjf wapbl_free(wl->wl_wc_scratch);
480 1.3.4.2 mjf wapbl_free(wl->wl_wc_header);
481 1.3.4.2 mjf #if WAPBL_UVM_ALLOC
482 1.3.4.2 mjf uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_deallocblks,
483 1.3.4.2 mjf round_page(sizeof(*wl->wl_deallocblks *
484 1.3.4.2 mjf wl->wl_dealloclim)));
485 1.3.4.2 mjf uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_dealloclens,
486 1.3.4.2 mjf round_page(sizeof(*wl->wl_dealloclens *
487 1.3.4.2 mjf wl->wl_dealloclim)));
488 1.3.4.2 mjf #else
489 1.3.4.2 mjf wapbl_free(wl->wl_deallocblks);
490 1.3.4.2 mjf wapbl_free(wl->wl_dealloclens);
491 1.3.4.2 mjf #endif
492 1.3.4.2 mjf wapbl_inodetrk_free(wl);
493 1.3.4.2 mjf wapbl_free(wl);
494 1.3.4.2 mjf
495 1.3.4.2 mjf return error;
496 1.3.4.2 mjf }
497 1.3.4.2 mjf
498 1.3.4.2 mjf /*
499 1.3.4.2 mjf * Like wapbl_flush, only discards the transaction
500 1.3.4.2 mjf * completely
501 1.3.4.2 mjf */
502 1.3.4.2 mjf
503 1.3.4.2 mjf void
504 1.3.4.2 mjf wapbl_discard(struct wapbl *wl)
505 1.3.4.2 mjf {
506 1.3.4.2 mjf struct wapbl_entry *we;
507 1.3.4.2 mjf struct buf *bp;
508 1.3.4.2 mjf int i;
509 1.3.4.2 mjf
510 1.3.4.2 mjf /*
511 1.3.4.2 mjf * XXX we may consider using upgrade here
512 1.3.4.2 mjf * if we want to call flush from inside a transaction
513 1.3.4.2 mjf */
514 1.3.4.2 mjf rw_enter(&wl->wl_rwlock, RW_WRITER);
515 1.3.4.2 mjf wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
516 1.3.4.2 mjf wl->wl_dealloccnt);
517 1.3.4.2 mjf
518 1.3.4.2 mjf #ifdef WAPBL_DEBUG_PRINT
519 1.3.4.2 mjf {
520 1.3.4.2 mjf struct wapbl_entry *we;
521 1.3.4.2 mjf pid_t pid = -1;
522 1.3.4.2 mjf lwpid_t lid = -1;
523 1.3.4.2 mjf if (curproc)
524 1.3.4.2 mjf pid = curproc->p_pid;
525 1.3.4.2 mjf if (curlwp)
526 1.3.4.2 mjf lid = curlwp->l_lid;
527 1.3.4.2 mjf #ifdef WAPBL_DEBUG_BUFBYTES
528 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
529 1.3.4.2 mjf ("wapbl_discard: thread %d.%d discarding "
530 1.3.4.2 mjf "transaction\n"
531 1.3.4.2 mjf "\tbufcount=%zu bufbytes=%zu bcount=%zu "
532 1.3.4.2 mjf "deallocs=%d inodes=%d\n"
533 1.3.4.2 mjf "\terrcnt = %u, reclaimable=%zu reserved=%zu "
534 1.3.4.2 mjf "unsynced=%zu\n",
535 1.3.4.2 mjf pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
536 1.3.4.2 mjf wl->wl_bcount, wl->wl_dealloccnt,
537 1.3.4.2 mjf wl->wl_inohashcnt, wl->wl_error_count,
538 1.3.4.2 mjf wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
539 1.3.4.2 mjf wl->wl_unsynced_bufbytes));
540 1.3.4.2 mjf SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
541 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
542 1.3.4.2 mjf ("\tentry: bufcount = %zu, reclaimable = %zu, "
543 1.3.4.2 mjf "error = %d, unsynced = %zu\n",
544 1.3.4.2 mjf we->we_bufcount, we->we_reclaimable_bytes,
545 1.3.4.2 mjf we->we_error, we->we_unsynced_bufbytes));
546 1.3.4.2 mjf }
547 1.3.4.2 mjf #else /* !WAPBL_DEBUG_BUFBYTES */
548 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
549 1.3.4.2 mjf ("wapbl_discard: thread %d.%d discarding transaction\n"
550 1.3.4.2 mjf "\tbufcount=%zu bufbytes=%zu bcount=%zu "
551 1.3.4.2 mjf "deallocs=%d inodes=%d\n"
552 1.3.4.2 mjf "\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
553 1.3.4.2 mjf pid, lid, wl->wl_bufcount, wl->wl_bufbytes,
554 1.3.4.2 mjf wl->wl_bcount, wl->wl_dealloccnt,
555 1.3.4.2 mjf wl->wl_inohashcnt, wl->wl_error_count,
556 1.3.4.2 mjf wl->wl_reclaimable_bytes, wl->wl_reserved_bytes));
557 1.3.4.2 mjf SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
558 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
559 1.3.4.2 mjf ("\tentry: bufcount = %zu, reclaimable = %zu, "
560 1.3.4.2 mjf "error = %d\n",
561 1.3.4.2 mjf we->we_bufcount, we->we_reclaimable_bytes,
562 1.3.4.2 mjf we->we_error));
563 1.3.4.2 mjf }
564 1.3.4.2 mjf #endif /* !WAPBL_DEBUG_BUFBYTES */
565 1.3.4.2 mjf }
566 1.3.4.2 mjf #endif /* WAPBL_DEBUG_PRINT */
567 1.3.4.2 mjf
568 1.3.4.2 mjf for (i = 0; i <= wl->wl_inohashmask; i++) {
569 1.3.4.2 mjf struct wapbl_ino_head *wih;
570 1.3.4.2 mjf struct wapbl_ino *wi;
571 1.3.4.2 mjf
572 1.3.4.2 mjf wih = &wl->wl_inohash[i];
573 1.3.4.2 mjf while ((wi = LIST_FIRST(wih)) != NULL) {
574 1.3.4.2 mjf LIST_REMOVE(wi, wi_hash);
575 1.3.4.2 mjf pool_put(&wapbl_ino_pool, wi);
576 1.3.4.2 mjf KASSERT(wl->wl_inohashcnt > 0);
577 1.3.4.2 mjf wl->wl_inohashcnt--;
578 1.3.4.2 mjf }
579 1.3.4.2 mjf }
580 1.3.4.2 mjf
581 1.3.4.2 mjf /*
582 1.3.4.2 mjf * clean buffer list
583 1.3.4.2 mjf */
584 1.3.4.2 mjf mutex_enter(&bufcache_lock);
585 1.3.4.2 mjf mutex_enter(&wl->wl_mtx);
586 1.3.4.2 mjf while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
587 1.3.4.2 mjf if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
588 1.3.4.2 mjf /*
589 1.3.4.2 mjf * The buffer will be unlocked and
590 1.3.4.2 mjf * removed from the transaction in brelse
591 1.3.4.2 mjf */
592 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
593 1.3.4.2 mjf brelsel(bp, 0);
594 1.3.4.2 mjf mutex_enter(&wl->wl_mtx);
595 1.3.4.2 mjf }
596 1.3.4.2 mjf }
597 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
598 1.3.4.2 mjf mutex_exit(&bufcache_lock);
599 1.3.4.2 mjf
600 1.3.4.2 mjf /*
601 1.3.4.2 mjf * Remove references to this wl from wl_entries, free any which
602 1.3.4.2 mjf * no longer have buffers, others will be freed in wapbl_biodone
603 1.3.4.2 mjf * when they no longer have any buffers.
604 1.3.4.2 mjf */
605 1.3.4.2 mjf while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
606 1.3.4.2 mjf SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
607 1.3.4.2 mjf /* XXX should we be accumulating wl_error_count
608 1.3.4.2 mjf * and increasing reclaimable bytes ? */
609 1.3.4.2 mjf we->we_wapbl = NULL;
610 1.3.4.2 mjf if (we->we_bufcount == 0) {
611 1.3.4.2 mjf #ifdef WAPBL_DEBUG_BUFBYTES
612 1.3.4.2 mjf KASSERT(we->we_unsynced_bufbytes == 0);
613 1.3.4.2 mjf #endif
614 1.3.4.2 mjf wapbl_free(we);
615 1.3.4.2 mjf }
616 1.3.4.2 mjf }
617 1.3.4.2 mjf
618 1.3.4.2 mjf /* Discard list of deallocs */
619 1.3.4.2 mjf wl->wl_dealloccnt = 0;
620 1.3.4.2 mjf /* XXX should we clear wl_reserved_bytes? */
621 1.3.4.2 mjf
622 1.3.4.2 mjf KASSERT(wl->wl_bufbytes == 0);
623 1.3.4.2 mjf KASSERT(wl->wl_bcount == 0);
624 1.3.4.2 mjf KASSERT(wl->wl_bufcount == 0);
625 1.3.4.2 mjf KASSERT(LIST_EMPTY(&wl->wl_bufs));
626 1.3.4.2 mjf KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
627 1.3.4.2 mjf KASSERT(wl->wl_inohashcnt == 0);
628 1.3.4.2 mjf
629 1.3.4.2 mjf rw_exit(&wl->wl_rwlock);
630 1.3.4.2 mjf }
631 1.3.4.2 mjf
632 1.3.4.2 mjf int
633 1.3.4.2 mjf wapbl_stop(struct wapbl *wl, int force)
634 1.3.4.2 mjf {
635 1.3.4.2 mjf struct vnode *vp;
636 1.3.4.2 mjf int error;
637 1.3.4.2 mjf
638 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
639 1.3.4.2 mjf error = wapbl_flush(wl, 1);
640 1.3.4.2 mjf if (error) {
641 1.3.4.2 mjf if (force)
642 1.3.4.2 mjf wapbl_discard(wl);
643 1.3.4.2 mjf else
644 1.3.4.2 mjf return error;
645 1.3.4.2 mjf }
646 1.3.4.2 mjf
647 1.3.4.2 mjf /* Unlinked inodes persist after a flush */
648 1.3.4.2 mjf if (wl->wl_inohashcnt) {
649 1.3.4.2 mjf if (force) {
650 1.3.4.2 mjf wapbl_discard(wl);
651 1.3.4.2 mjf } else {
652 1.3.4.2 mjf return EBUSY;
653 1.3.4.2 mjf }
654 1.3.4.2 mjf }
655 1.3.4.2 mjf
656 1.3.4.2 mjf KASSERT(wl->wl_bufbytes == 0);
657 1.3.4.2 mjf KASSERT(wl->wl_bcount == 0);
658 1.3.4.2 mjf KASSERT(wl->wl_bufcount == 0);
659 1.3.4.2 mjf KASSERT(LIST_EMPTY(&wl->wl_bufs));
660 1.3.4.2 mjf KASSERT(wl->wl_dealloccnt == 0);
661 1.3.4.2 mjf KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
662 1.3.4.2 mjf KASSERT(wl->wl_inohashcnt == 0);
663 1.3.4.2 mjf
664 1.3.4.2 mjf vp = wl->wl_logvp;
665 1.3.4.2 mjf
666 1.3.4.2 mjf wapbl_free(wl->wl_wc_scratch);
667 1.3.4.2 mjf wapbl_free(wl->wl_wc_header);
668 1.3.4.2 mjf #if WAPBL_UVM_ALLOC
669 1.3.4.2 mjf uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_deallocblks,
670 1.3.4.2 mjf round_page(sizeof(*wl->wl_deallocblks *
671 1.3.4.2 mjf wl->wl_dealloclim)));
672 1.3.4.2 mjf uvm_km_free_wakeup(kernel_map, (vaddr_t) wl->wl_dealloclens,
673 1.3.4.2 mjf round_page(sizeof(*wl->wl_dealloclens *
674 1.3.4.2 mjf wl->wl_dealloclim)));
675 1.3.4.2 mjf #else
676 1.3.4.2 mjf wapbl_free(wl->wl_deallocblks);
677 1.3.4.2 mjf wapbl_free(wl->wl_dealloclens);
678 1.3.4.2 mjf #endif
679 1.3.4.2 mjf wapbl_inodetrk_free(wl);
680 1.3.4.2 mjf
681 1.3.4.2 mjf cv_destroy(&wl->wl_reclaimable_cv);
682 1.3.4.2 mjf mutex_destroy(&wl->wl_mtx);
683 1.3.4.2 mjf rw_destroy(&wl->wl_rwlock);
684 1.3.4.2 mjf wapbl_free(wl);
685 1.3.4.2 mjf
686 1.3.4.2 mjf return 0;
687 1.3.4.2 mjf }
688 1.3.4.2 mjf
689 1.3.4.2 mjf static int
690 1.3.4.2 mjf wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
691 1.3.4.2 mjf {
692 1.3.4.2 mjf struct pstats *pstats = curlwp->l_proc->p_stats;
693 1.3.4.2 mjf struct buf *bp;
694 1.3.4.2 mjf int error;
695 1.3.4.2 mjf
696 1.3.4.2 mjf KASSERT((flags & ~(B_WRITE | B_READ)) == 0);
697 1.3.4.2 mjf KASSERT(devvp->v_type == VBLK);
698 1.3.4.2 mjf
699 1.3.4.2 mjf if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
700 1.3.4.2 mjf mutex_enter(&devvp->v_interlock);
701 1.3.4.2 mjf devvp->v_numoutput++;
702 1.3.4.2 mjf mutex_exit(&devvp->v_interlock);
703 1.3.4.2 mjf pstats->p_ru.ru_oublock++;
704 1.3.4.2 mjf } else {
705 1.3.4.2 mjf pstats->p_ru.ru_inblock++;
706 1.3.4.2 mjf }
707 1.3.4.2 mjf
708 1.3.4.2 mjf bp = getiobuf(devvp, true);
709 1.3.4.2 mjf bp->b_flags = flags;
710 1.3.4.2 mjf bp->b_cflags = BC_BUSY; /* silly & dubious */
711 1.3.4.2 mjf bp->b_dev = devvp->v_rdev;
712 1.3.4.2 mjf bp->b_data = data;
713 1.3.4.2 mjf bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
714 1.3.4.2 mjf bp->b_blkno = pbn;
715 1.3.4.2 mjf
716 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_IO,
717 1.3.4.2 mjf ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%x\n",
718 1.3.4.2 mjf BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
719 1.3.4.2 mjf bp->b_blkno, bp->b_dev));
720 1.3.4.2 mjf
721 1.3.4.2 mjf VOP_STRATEGY(devvp, bp);
722 1.3.4.2 mjf
723 1.3.4.2 mjf error = biowait(bp);
724 1.3.4.2 mjf putiobuf(bp);
725 1.3.4.2 mjf
726 1.3.4.2 mjf if (error) {
727 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_ERROR,
728 1.3.4.2 mjf ("wapbl_doio: %s %zu bytes at block %" PRId64
729 1.3.4.2 mjf " on dev 0x%x failed with error %d\n",
730 1.3.4.2 mjf (((flags & (B_WRITE | B_READ)) == B_WRITE) ?
731 1.3.4.2 mjf "write" : "read"),
732 1.3.4.2 mjf len, pbn, devvp->v_rdev, error));
733 1.3.4.2 mjf }
734 1.3.4.2 mjf
735 1.3.4.2 mjf return error;
736 1.3.4.2 mjf }
737 1.3.4.2 mjf
738 1.3.4.2 mjf int
739 1.3.4.2 mjf wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
740 1.3.4.2 mjf {
741 1.3.4.2 mjf
742 1.3.4.2 mjf return wapbl_doio(data, len, devvp, pbn, B_WRITE);
743 1.3.4.2 mjf }
744 1.3.4.2 mjf
745 1.3.4.2 mjf int
746 1.3.4.2 mjf wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
747 1.3.4.2 mjf {
748 1.3.4.2 mjf
749 1.3.4.2 mjf return wapbl_doio(data, len, devvp, pbn, B_READ);
750 1.3.4.2 mjf }
751 1.3.4.2 mjf
752 1.3.4.2 mjf /*
753 1.3.4.2 mjf * Off is byte offset returns new offset for next write
754 1.3.4.2 mjf * handles log wraparound
755 1.3.4.2 mjf */
756 1.3.4.2 mjf static int
757 1.3.4.2 mjf wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
758 1.3.4.2 mjf {
759 1.3.4.2 mjf size_t slen;
760 1.3.4.2 mjf off_t off = *offp;
761 1.3.4.2 mjf int error;
762 1.3.4.2 mjf
763 1.3.4.2 mjf KDASSERT(((len >> wl->wl_log_dev_bshift) <<
764 1.3.4.2 mjf wl->wl_log_dev_bshift) == len);
765 1.3.4.2 mjf
766 1.3.4.2 mjf if (off < wl->wl_circ_off)
767 1.3.4.2 mjf off = wl->wl_circ_off;
768 1.3.4.2 mjf slen = wl->wl_circ_off + wl->wl_circ_size - off;
769 1.3.4.2 mjf if (slen < len) {
770 1.3.4.2 mjf error = wapbl_write(data, slen, wl->wl_devvp,
771 1.3.4.2 mjf wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
772 1.3.4.2 mjf if (error)
773 1.3.4.2 mjf return error;
774 1.3.4.2 mjf data = (uint8_t *)data + slen;
775 1.3.4.2 mjf len -= slen;
776 1.3.4.2 mjf off = wl->wl_circ_off;
777 1.3.4.2 mjf }
778 1.3.4.2 mjf error = wapbl_write(data, len, wl->wl_devvp,
779 1.3.4.2 mjf wl->wl_logpbn + (off >> wl->wl_log_dev_bshift));
780 1.3.4.2 mjf if (error)
781 1.3.4.2 mjf return error;
782 1.3.4.2 mjf off += len;
783 1.3.4.2 mjf if (off >= wl->wl_circ_off + wl->wl_circ_size)
784 1.3.4.2 mjf off = wl->wl_circ_off;
785 1.3.4.2 mjf *offp = off;
786 1.3.4.2 mjf return 0;
787 1.3.4.2 mjf }
788 1.3.4.2 mjf
789 1.3.4.2 mjf /****************************************************************/
790 1.3.4.2 mjf
791 1.3.4.2 mjf int
792 1.3.4.2 mjf wapbl_begin(struct wapbl *wl, const char *file, int line)
793 1.3.4.2 mjf {
794 1.3.4.2 mjf int doflush;
795 1.3.4.2 mjf unsigned lockcount;
796 1.3.4.2 mjf krw_t op;
797 1.3.4.2 mjf
798 1.3.4.2 mjf KDASSERT(wl);
799 1.3.4.2 mjf
800 1.3.4.2 mjf /*
801 1.3.4.2 mjf * XXX: The original code calls for the use of a RW_READER lock
802 1.3.4.2 mjf * here, but it turns out there are performance issues with high
803 1.3.4.2 mjf * metadata-rate workloads (e.g. multiple simultaneous tar
804 1.3.4.2 mjf * extractions). For now, we force the lock to be RW_WRITER,
805 1.3.4.2 mjf * since that currently has the best performance characteristics
806 1.3.4.2 mjf * (even for a single tar-file extraction).
807 1.3.4.2 mjf *
808 1.3.4.2 mjf */
809 1.3.4.2 mjf #define WAPBL_DEBUG_SERIALIZE 1
810 1.3.4.2 mjf
811 1.3.4.2 mjf #ifdef WAPBL_DEBUG_SERIALIZE
812 1.3.4.2 mjf op = RW_WRITER;
813 1.3.4.2 mjf #else
814 1.3.4.2 mjf op = RW_READER;
815 1.3.4.2 mjf #endif
816 1.3.4.2 mjf
817 1.3.4.2 mjf /*
818 1.3.4.2 mjf * XXX this needs to be made much more sophisticated.
819 1.3.4.2 mjf * perhaps each wapbl_begin could reserve a specified
820 1.3.4.2 mjf * number of buffers and bytes.
821 1.3.4.2 mjf */
822 1.3.4.2 mjf mutex_enter(&wl->wl_mtx);
823 1.3.4.2 mjf lockcount = wl->wl_lock_count;
824 1.3.4.2 mjf doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) >
825 1.3.4.2 mjf wl->wl_bufbytes_max / 2) ||
826 1.3.4.2 mjf ((wl->wl_bufcount + (lockcount * 10)) >
827 1.3.4.2 mjf wl->wl_bufcount_max / 2) ||
828 1.3.4.2 mjf (wapbl_transaction_len(wl) > wl->wl_circ_size / 2);
829 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
830 1.3.4.2 mjf
831 1.3.4.2 mjf if (doflush) {
832 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
833 1.3.4.2 mjf ("force flush lockcnt=%d bufbytes=%zu "
834 1.3.4.2 mjf "(max=%zu) bufcount=%zu (max=%zu)\n",
835 1.3.4.2 mjf lockcount, wl->wl_bufbytes,
836 1.3.4.2 mjf wl->wl_bufbytes_max, wl->wl_bufcount,
837 1.3.4.2 mjf wl->wl_bufcount_max));
838 1.3.4.2 mjf }
839 1.3.4.2 mjf
840 1.3.4.2 mjf if (doflush) {
841 1.3.4.2 mjf int error = wapbl_flush(wl, 0);
842 1.3.4.2 mjf if (error)
843 1.3.4.2 mjf return error;
844 1.3.4.2 mjf }
845 1.3.4.2 mjf
846 1.3.4.2 mjf rw_enter(&wl->wl_rwlock, op);
847 1.3.4.2 mjf mutex_enter(&wl->wl_mtx);
848 1.3.4.2 mjf wl->wl_lock_count++;
849 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
850 1.3.4.2 mjf
851 1.3.4.2 mjf #if defined(WAPBL_DEBUG_PRINT) && defined(WAPBL_DEBUG_SERIALIZE)
852 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
853 1.3.4.2 mjf ("wapbl_begin thread %d.%d with bufcount=%zu "
854 1.3.4.2 mjf "bufbytes=%zu bcount=%zu at %s:%d\n",
855 1.3.4.2 mjf curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
856 1.3.4.2 mjf wl->wl_bufbytes, wl->wl_bcount, file, line));
857 1.3.4.2 mjf #endif
858 1.3.4.2 mjf
859 1.3.4.2 mjf return 0;
860 1.3.4.2 mjf }
861 1.3.4.2 mjf
862 1.3.4.2 mjf void
863 1.3.4.2 mjf wapbl_end(struct wapbl *wl)
864 1.3.4.2 mjf {
865 1.3.4.2 mjf
866 1.3.4.2 mjf #if defined(WAPBL_DEBUG_PRINT) && defined(WAPBL_DEBUG_SERIALIZE)
867 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
868 1.3.4.2 mjf ("wapbl_end thread %d.%d with bufcount=%zu "
869 1.3.4.2 mjf "bufbytes=%zu bcount=%zu\n",
870 1.3.4.2 mjf curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
871 1.3.4.2 mjf wl->wl_bufbytes, wl->wl_bcount));
872 1.3.4.2 mjf #endif
873 1.3.4.2 mjf
874 1.3.4.2 mjf mutex_enter(&wl->wl_mtx);
875 1.3.4.2 mjf KASSERT(wl->wl_lock_count > 0);
876 1.3.4.2 mjf wl->wl_lock_count--;
877 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
878 1.3.4.2 mjf
879 1.3.4.2 mjf rw_exit(&wl->wl_rwlock);
880 1.3.4.2 mjf }
881 1.3.4.2 mjf
882 1.3.4.2 mjf void
883 1.3.4.2 mjf wapbl_add_buf(struct wapbl *wl, struct buf * bp)
884 1.3.4.2 mjf {
885 1.3.4.2 mjf
886 1.3.4.2 mjf KASSERT(bp->b_cflags & BC_BUSY);
887 1.3.4.2 mjf KASSERT(bp->b_vp);
888 1.3.4.2 mjf
889 1.3.4.2 mjf wapbl_jlock_assert(wl);
890 1.3.4.2 mjf
891 1.3.4.2 mjf #if 0
892 1.3.4.2 mjf /*
893 1.3.4.2 mjf * XXX this might be an issue for swapfiles.
894 1.3.4.2 mjf * see uvm_swap.c:1702
895 1.3.4.2 mjf *
896 1.3.4.2 mjf * XXX2 why require it then? leap of semantics?
897 1.3.4.2 mjf */
898 1.3.4.2 mjf KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
899 1.3.4.2 mjf #endif
900 1.3.4.2 mjf
901 1.3.4.2 mjf mutex_enter(&wl->wl_mtx);
902 1.3.4.2 mjf if (bp->b_flags & B_LOCKED) {
903 1.3.4.2 mjf LIST_REMOVE(bp, b_wapbllist);
904 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
905 1.3.4.2 mjf ("wapbl_add_buf thread %d.%d re-adding buf %p "
906 1.3.4.2 mjf "with %d bytes %d bcount\n",
907 1.3.4.2 mjf curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
908 1.3.4.2 mjf bp->b_bcount));
909 1.3.4.2 mjf } else {
910 1.3.4.2 mjf /* unlocked by dirty buffers shouldn't exist */
911 1.3.4.2 mjf KASSERT(!(bp->b_oflags & BO_DELWRI));
912 1.3.4.2 mjf wl->wl_bufbytes += bp->b_bufsize;
913 1.3.4.2 mjf wl->wl_bcount += bp->b_bcount;
914 1.3.4.2 mjf wl->wl_bufcount++;
915 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
916 1.3.4.2 mjf ("wapbl_add_buf thread %d.%d adding buf %p "
917 1.3.4.2 mjf "with %d bytes %d bcount\n",
918 1.3.4.2 mjf curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize,
919 1.3.4.2 mjf bp->b_bcount));
920 1.3.4.2 mjf }
921 1.3.4.2 mjf LIST_INSERT_HEAD(&wl->wl_bufs, bp, b_wapbllist);
922 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
923 1.3.4.2 mjf
924 1.3.4.2 mjf bp->b_flags |= B_LOCKED;
925 1.3.4.2 mjf }
926 1.3.4.2 mjf
927 1.3.4.2 mjf static void
928 1.3.4.2 mjf wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
929 1.3.4.2 mjf {
930 1.3.4.2 mjf
931 1.3.4.2 mjf KASSERT(mutex_owned(&wl->wl_mtx));
932 1.3.4.2 mjf KASSERT(bp->b_cflags & BC_BUSY);
933 1.3.4.2 mjf wapbl_jlock_assert(wl);
934 1.3.4.2 mjf
935 1.3.4.2 mjf #if 0
936 1.3.4.2 mjf /*
937 1.3.4.2 mjf * XXX this might be an issue for swapfiles.
938 1.3.4.2 mjf * see uvm_swap.c:1725
939 1.3.4.2 mjf *
940 1.3.4.2 mjf * XXXdeux: see above
941 1.3.4.2 mjf */
942 1.3.4.2 mjf KASSERT((bp->b_flags & BC_NOCACHE) == 0);
943 1.3.4.2 mjf #endif
944 1.3.4.2 mjf KASSERT(bp->b_flags & B_LOCKED);
945 1.3.4.2 mjf
946 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
947 1.3.4.2 mjf ("wapbl_remove_buf thread %d.%d removing buf %p with "
948 1.3.4.2 mjf "%d bytes %d bcount\n",
949 1.3.4.2 mjf curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, bp->b_bcount));
950 1.3.4.2 mjf
951 1.3.4.2 mjf KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
952 1.3.4.2 mjf wl->wl_bufbytes -= bp->b_bufsize;
953 1.3.4.2 mjf KASSERT(wl->wl_bcount >= bp->b_bcount);
954 1.3.4.2 mjf wl->wl_bcount -= bp->b_bcount;
955 1.3.4.2 mjf KASSERT(wl->wl_bufcount > 0);
956 1.3.4.2 mjf wl->wl_bufcount--;
957 1.3.4.2 mjf KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
958 1.3.4.2 mjf KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
959 1.3.4.2 mjf LIST_REMOVE(bp, b_wapbllist);
960 1.3.4.2 mjf
961 1.3.4.2 mjf bp->b_flags &= ~B_LOCKED;
962 1.3.4.2 mjf }
963 1.3.4.2 mjf
964 1.3.4.2 mjf /* called from brelsel() in vfs_bio among other places */
965 1.3.4.2 mjf void
966 1.3.4.2 mjf wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
967 1.3.4.2 mjf {
968 1.3.4.2 mjf
969 1.3.4.2 mjf mutex_enter(&wl->wl_mtx);
970 1.3.4.2 mjf wapbl_remove_buf_locked(wl, bp);
971 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
972 1.3.4.2 mjf }
973 1.3.4.2 mjf
974 1.3.4.2 mjf void
975 1.3.4.2 mjf wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
976 1.3.4.2 mjf {
977 1.3.4.2 mjf
978 1.3.4.2 mjf KASSERT(bp->b_cflags & BC_BUSY);
979 1.3.4.2 mjf
980 1.3.4.2 mjf /*
981 1.3.4.2 mjf * XXX: why does this depend on B_LOCKED? otherwise the buf
982 1.3.4.2 mjf * is not for a transaction? if so, why is this called in the
983 1.3.4.2 mjf * first place?
984 1.3.4.2 mjf */
985 1.3.4.2 mjf if (bp->b_flags & B_LOCKED) {
986 1.3.4.2 mjf mutex_enter(&wl->wl_mtx);
987 1.3.4.2 mjf wl->wl_bufbytes += bp->b_bufsize - oldsz;
988 1.3.4.2 mjf wl->wl_bcount += bp->b_bcount - oldcnt;
989 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
990 1.3.4.2 mjf }
991 1.3.4.2 mjf }
992 1.3.4.2 mjf
993 1.3.4.2 mjf #endif /* _KERNEL */
994 1.3.4.2 mjf
995 1.3.4.2 mjf /****************************************************************/
996 1.3.4.2 mjf /* Some utility inlines */
997 1.3.4.2 mjf
998 1.3.4.2 mjf /* This is used to advance the pointer at old to new value at old+delta */
999 1.3.4.2 mjf static __inline off_t
1000 1.3.4.2 mjf wapbl_advance(size_t size, size_t off, off_t old, size_t delta)
1001 1.3.4.2 mjf {
1002 1.3.4.2 mjf off_t new;
1003 1.3.4.2 mjf
1004 1.3.4.2 mjf /* Define acceptable ranges for inputs. */
1005 1.3.4.2 mjf KASSERT(delta <= size);
1006 1.3.4.2 mjf KASSERT((old == 0) || (old >= off));
1007 1.3.4.2 mjf KASSERT(old < (size + off));
1008 1.3.4.2 mjf
1009 1.3.4.2 mjf if ((old == 0) && (delta != 0))
1010 1.3.4.2 mjf new = off + delta;
1011 1.3.4.2 mjf else if ((old + delta) < (size + off))
1012 1.3.4.2 mjf new = old + delta;
1013 1.3.4.2 mjf else
1014 1.3.4.2 mjf new = (old + delta) - size;
1015 1.3.4.2 mjf
1016 1.3.4.2 mjf /* Note some interesting axioms */
1017 1.3.4.2 mjf KASSERT((delta != 0) || (new == old));
1018 1.3.4.2 mjf KASSERT((delta == 0) || (new != 0));
1019 1.3.4.2 mjf KASSERT((delta != (size)) || (new == old));
1020 1.3.4.2 mjf
1021 1.3.4.2 mjf /* Define acceptable ranges for output. */
1022 1.3.4.2 mjf KASSERT((new == 0) || (new >= off));
1023 1.3.4.2 mjf KASSERT(new < (size + off));
1024 1.3.4.2 mjf return new;
1025 1.3.4.2 mjf }
1026 1.3.4.2 mjf
1027 1.3.4.2 mjf static __inline size_t
1028 1.3.4.2 mjf wapbl_space_used(size_t avail, off_t head, off_t tail)
1029 1.3.4.2 mjf {
1030 1.3.4.2 mjf
1031 1.3.4.2 mjf if (tail == 0) {
1032 1.3.4.2 mjf KASSERT(head == 0);
1033 1.3.4.2 mjf return 0;
1034 1.3.4.2 mjf }
1035 1.3.4.2 mjf return ((head + (avail - 1) - tail) % avail) + 1;
1036 1.3.4.2 mjf }
1037 1.3.4.2 mjf
1038 1.3.4.2 mjf static __inline size_t
1039 1.3.4.2 mjf wapbl_space_free(size_t avail, off_t head, off_t tail)
1040 1.3.4.2 mjf {
1041 1.3.4.2 mjf
1042 1.3.4.2 mjf return avail - wapbl_space_used(avail, head, tail);
1043 1.3.4.2 mjf }
1044 1.3.4.2 mjf
1045 1.3.4.2 mjf static __inline void
1046 1.3.4.2 mjf wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1047 1.3.4.2 mjf off_t *tailp)
1048 1.3.4.2 mjf {
1049 1.3.4.2 mjf off_t head = *headp;
1050 1.3.4.2 mjf off_t tail = *tailp;
1051 1.3.4.2 mjf
1052 1.3.4.2 mjf KASSERT(delta <= wapbl_space_free(size, head, tail));
1053 1.3.4.2 mjf head = wapbl_advance(size, off, head, delta);
1054 1.3.4.2 mjf if ((tail == 0) && (head != 0))
1055 1.3.4.2 mjf tail = off;
1056 1.3.4.2 mjf *headp = head;
1057 1.3.4.2 mjf *tailp = tail;
1058 1.3.4.2 mjf }
1059 1.3.4.2 mjf
1060 1.3.4.2 mjf static __inline void
1061 1.3.4.2 mjf wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1062 1.3.4.2 mjf off_t *tailp)
1063 1.3.4.2 mjf {
1064 1.3.4.2 mjf off_t head = *headp;
1065 1.3.4.2 mjf off_t tail = *tailp;
1066 1.3.4.2 mjf
1067 1.3.4.2 mjf KASSERT(delta <= wapbl_space_used(size, head, tail));
1068 1.3.4.2 mjf tail = wapbl_advance(size, off, tail, delta);
1069 1.3.4.2 mjf if (head == tail) {
1070 1.3.4.2 mjf head = tail = 0;
1071 1.3.4.2 mjf }
1072 1.3.4.2 mjf *headp = head;
1073 1.3.4.2 mjf *tailp = tail;
1074 1.3.4.2 mjf }
1075 1.3.4.2 mjf
1076 1.3.4.2 mjf #ifdef _KERNEL
1077 1.3.4.2 mjf
1078 1.3.4.2 mjf /****************************************************************/
1079 1.3.4.2 mjf
1080 1.3.4.2 mjf /*
1081 1.3.4.2 mjf * Remove transactions whose buffers are completely flushed to disk.
1082 1.3.4.2 mjf * Will block until at least minfree space is available.
1083 1.3.4.2 mjf * only intended to be called from inside wapbl_flush and therefore
1084 1.3.4.2 mjf * does not protect against commit races with itself or with flush.
1085 1.3.4.2 mjf */
1086 1.3.4.2 mjf static int
1087 1.3.4.2 mjf wapbl_truncate(struct wapbl *wl, size_t minfree, int waitonly)
1088 1.3.4.2 mjf {
1089 1.3.4.2 mjf size_t delta;
1090 1.3.4.2 mjf size_t avail;
1091 1.3.4.2 mjf off_t head;
1092 1.3.4.2 mjf off_t tail;
1093 1.3.4.2 mjf int error = 0;
1094 1.3.4.2 mjf
1095 1.3.4.2 mjf KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1096 1.3.4.2 mjf KASSERT(rw_write_held(&wl->wl_rwlock));
1097 1.3.4.2 mjf
1098 1.3.4.2 mjf mutex_enter(&wl->wl_mtx);
1099 1.3.4.2 mjf
1100 1.3.4.2 mjf /*
1101 1.3.4.2 mjf * First check to see if we have to do a commit
1102 1.3.4.2 mjf * at all.
1103 1.3.4.2 mjf */
1104 1.3.4.2 mjf avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1105 1.3.4.2 mjf if (minfree < avail) {
1106 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
1107 1.3.4.2 mjf return 0;
1108 1.3.4.2 mjf }
1109 1.3.4.2 mjf minfree -= avail;
1110 1.3.4.2 mjf while ((wl->wl_error_count == 0) &&
1111 1.3.4.2 mjf (wl->wl_reclaimable_bytes < minfree)) {
1112 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1113 1.3.4.2 mjf ("wapbl_truncate: sleeping on %p wl=%p bytes=%zd "
1114 1.3.4.2 mjf "minfree=%zd\n",
1115 1.3.4.2 mjf &wl->wl_reclaimable_bytes, wl, wl->wl_reclaimable_bytes,
1116 1.3.4.2 mjf minfree));
1117 1.3.4.2 mjf
1118 1.3.4.2 mjf cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1119 1.3.4.2 mjf }
1120 1.3.4.2 mjf if (wl->wl_reclaimable_bytes < minfree) {
1121 1.3.4.2 mjf KASSERT(wl->wl_error_count);
1122 1.3.4.2 mjf /* XXX maybe get actual error from buffer instead someday? */
1123 1.3.4.2 mjf error = EIO;
1124 1.3.4.2 mjf }
1125 1.3.4.2 mjf head = wl->wl_head;
1126 1.3.4.2 mjf tail = wl->wl_tail;
1127 1.3.4.2 mjf delta = wl->wl_reclaimable_bytes;
1128 1.3.4.2 mjf
1129 1.3.4.2 mjf /* If all of of the entries are flushed, then be sure to keep
1130 1.3.4.2 mjf * the reserved bytes reserved. Watch out for discarded transactions,
1131 1.3.4.2 mjf * which could leave more bytes reserved than are reclaimable.
1132 1.3.4.2 mjf */
1133 1.3.4.2 mjf if (SIMPLEQ_EMPTY(&wl->wl_entries) &&
1134 1.3.4.2 mjf (delta >= wl->wl_reserved_bytes)) {
1135 1.3.4.2 mjf delta -= wl->wl_reserved_bytes;
1136 1.3.4.2 mjf }
1137 1.3.4.2 mjf wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1138 1.3.4.2 mjf &tail);
1139 1.3.4.2 mjf KDASSERT(wl->wl_reserved_bytes <=
1140 1.3.4.2 mjf wapbl_space_used(wl->wl_circ_size, head, tail));
1141 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
1142 1.3.4.2 mjf
1143 1.3.4.2 mjf if (error)
1144 1.3.4.2 mjf return error;
1145 1.3.4.2 mjf
1146 1.3.4.2 mjf if (waitonly)
1147 1.3.4.2 mjf return 0;
1148 1.3.4.2 mjf
1149 1.3.4.2 mjf /*
1150 1.3.4.2 mjf * This is where head, tail and delta are unprotected
1151 1.3.4.2 mjf * from races against itself or flush. This is ok since
1152 1.3.4.2 mjf * we only call this routine from inside flush itself.
1153 1.3.4.2 mjf *
1154 1.3.4.2 mjf * XXX: how can it race against itself when accessed only
1155 1.3.4.2 mjf * from behind the write-locked rwlock?
1156 1.3.4.2 mjf */
1157 1.3.4.2 mjf error = wapbl_write_commit(wl, head, tail);
1158 1.3.4.2 mjf if (error)
1159 1.3.4.2 mjf return error;
1160 1.3.4.2 mjf
1161 1.3.4.2 mjf wl->wl_head = head;
1162 1.3.4.2 mjf wl->wl_tail = tail;
1163 1.3.4.2 mjf
1164 1.3.4.2 mjf mutex_enter(&wl->wl_mtx);
1165 1.3.4.2 mjf KASSERT(wl->wl_reclaimable_bytes >= delta);
1166 1.3.4.2 mjf wl->wl_reclaimable_bytes -= delta;
1167 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
1168 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1169 1.3.4.2 mjf ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1170 1.3.4.2 mjf curproc->p_pid, curlwp->l_lid, delta));
1171 1.3.4.2 mjf
1172 1.3.4.2 mjf return 0;
1173 1.3.4.2 mjf }
1174 1.3.4.2 mjf
1175 1.3.4.2 mjf /****************************************************************/
1176 1.3.4.2 mjf
1177 1.3.4.2 mjf void
1178 1.3.4.2 mjf wapbl_biodone(struct buf *bp)
1179 1.3.4.2 mjf {
1180 1.3.4.2 mjf struct wapbl_entry *we = bp->b_private;
1181 1.3.4.2 mjf struct wapbl *wl = we->we_wapbl;
1182 1.3.4.2 mjf
1183 1.3.4.2 mjf /*
1184 1.3.4.2 mjf * Handle possible flushing of buffers after log has been
1185 1.3.4.2 mjf * decomissioned.
1186 1.3.4.2 mjf */
1187 1.3.4.2 mjf if (!wl) {
1188 1.3.4.2 mjf KASSERT(we->we_bufcount > 0);
1189 1.3.4.2 mjf we->we_bufcount--;
1190 1.3.4.2 mjf #ifdef WAPBL_DEBUG_BUFBYTES
1191 1.3.4.2 mjf KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1192 1.3.4.2 mjf we->we_unsynced_bufbytes -= bp->b_bufsize;
1193 1.3.4.2 mjf #endif
1194 1.3.4.2 mjf
1195 1.3.4.2 mjf if (we->we_bufcount == 0) {
1196 1.3.4.2 mjf #ifdef WAPBL_DEBUG_BUFBYTES
1197 1.3.4.2 mjf KASSERT(we->we_unsynced_bufbytes == 0);
1198 1.3.4.2 mjf #endif
1199 1.3.4.2 mjf wapbl_free(we);
1200 1.3.4.2 mjf }
1201 1.3.4.2 mjf
1202 1.3.4.2 mjf brelse(bp, 0);
1203 1.3.4.2 mjf return;
1204 1.3.4.2 mjf }
1205 1.3.4.2 mjf
1206 1.3.4.2 mjf #ifdef ohbother
1207 1.3.4.2 mjf KDASSERT(bp->b_flags & B_DONE);
1208 1.3.4.2 mjf KDASSERT(!(bp->b_flags & B_DELWRI));
1209 1.3.4.2 mjf KDASSERT(bp->b_flags & B_ASYNC);
1210 1.3.4.2 mjf KDASSERT(bp->b_flags & B_BUSY);
1211 1.3.4.2 mjf KDASSERT(!(bp->b_flags & B_LOCKED));
1212 1.3.4.2 mjf KDASSERT(!(bp->b_flags & B_READ));
1213 1.3.4.2 mjf KDASSERT(!(bp->b_flags & B_INVAL));
1214 1.3.4.2 mjf KDASSERT(!(bp->b_flags & B_NOCACHE));
1215 1.3.4.2 mjf #endif
1216 1.3.4.2 mjf
1217 1.3.4.2 mjf if (bp->b_error) {
1218 1.3.4.2 mjf #ifdef notyet /* Can't currently handle possible dirty buffer reuse */
1219 1.3.4.2 mjf XXXpooka: interfaces not fully updated
1220 1.3.4.2 mjf Note: this was not enabled in the original patch
1221 1.3.4.2 mjf against netbsd4 either. I don't know if comment
1222 1.3.4.2 mjf above is true or not.
1223 1.3.4.2 mjf
1224 1.3.4.2 mjf /*
1225 1.3.4.2 mjf * If an error occurs, report the error and leave the
1226 1.3.4.2 mjf * buffer as a delayed write on the LRU queue.
1227 1.3.4.2 mjf * restarting the write would likely result in
1228 1.3.4.2 mjf * an error spinloop, so let it be done harmlessly
1229 1.3.4.2 mjf * by the syncer.
1230 1.3.4.2 mjf */
1231 1.3.4.2 mjf bp->b_flags &= ~(B_DONE);
1232 1.3.4.2 mjf simple_unlock(&bp->b_interlock);
1233 1.3.4.2 mjf
1234 1.3.4.2 mjf if (we->we_error == 0) {
1235 1.3.4.2 mjf mutex_enter(&wl->wl_mtx);
1236 1.3.4.2 mjf wl->wl_error_count++;
1237 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
1238 1.3.4.2 mjf cv_broadcast(&wl->wl_reclaimable_cv);
1239 1.3.4.2 mjf }
1240 1.3.4.2 mjf we->we_error = bp->b_error;
1241 1.3.4.2 mjf bp->b_error = 0;
1242 1.3.4.2 mjf brelse(bp);
1243 1.3.4.2 mjf return;
1244 1.3.4.2 mjf #else
1245 1.3.4.2 mjf /* For now, just mark the log permanently errored out */
1246 1.3.4.2 mjf
1247 1.3.4.2 mjf mutex_enter(&wl->wl_mtx);
1248 1.3.4.2 mjf if (wl->wl_error_count == 0) {
1249 1.3.4.2 mjf wl->wl_error_count++;
1250 1.3.4.2 mjf cv_broadcast(&wl->wl_reclaimable_cv);
1251 1.3.4.2 mjf }
1252 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
1253 1.3.4.2 mjf #endif
1254 1.3.4.2 mjf }
1255 1.3.4.2 mjf
1256 1.3.4.2 mjf mutex_enter(&wl->wl_mtx);
1257 1.3.4.2 mjf
1258 1.3.4.2 mjf KASSERT(we->we_bufcount > 0);
1259 1.3.4.2 mjf we->we_bufcount--;
1260 1.3.4.2 mjf #ifdef WAPBL_DEBUG_BUFBYTES
1261 1.3.4.2 mjf KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize);
1262 1.3.4.2 mjf we->we_unsynced_bufbytes -= bp->b_bufsize;
1263 1.3.4.2 mjf KASSERT(wl->wl_unsynced_bufbytes >= bp->b_bufsize);
1264 1.3.4.2 mjf wl->wl_unsynced_bufbytes -= bp->b_bufsize;
1265 1.3.4.2 mjf #endif
1266 1.3.4.2 mjf
1267 1.3.4.2 mjf /*
1268 1.3.4.2 mjf * If the current transaction can be reclaimed, start
1269 1.3.4.2 mjf * at the beginning and reclaim any consecutive reclaimable
1270 1.3.4.2 mjf * transactions. If we successfully reclaim anything,
1271 1.3.4.2 mjf * then wakeup anyone waiting for the reclaim.
1272 1.3.4.2 mjf */
1273 1.3.4.2 mjf if (we->we_bufcount == 0) {
1274 1.3.4.2 mjf size_t delta = 0;
1275 1.3.4.2 mjf int errcnt = 0;
1276 1.3.4.2 mjf #ifdef WAPBL_DEBUG_BUFBYTES
1277 1.3.4.2 mjf KDASSERT(we->we_unsynced_bufbytes == 0);
1278 1.3.4.2 mjf #endif
1279 1.3.4.2 mjf /*
1280 1.3.4.2 mjf * clear any posted error, since the buffer it came from
1281 1.3.4.2 mjf * has successfully flushed by now
1282 1.3.4.2 mjf */
1283 1.3.4.2 mjf while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1284 1.3.4.2 mjf (we->we_bufcount == 0)) {
1285 1.3.4.2 mjf delta += we->we_reclaimable_bytes;
1286 1.3.4.2 mjf if (we->we_error)
1287 1.3.4.2 mjf errcnt++;
1288 1.3.4.2 mjf SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1289 1.3.4.2 mjf wapbl_free(we);
1290 1.3.4.2 mjf }
1291 1.3.4.2 mjf
1292 1.3.4.2 mjf if (delta) {
1293 1.3.4.2 mjf wl->wl_reclaimable_bytes += delta;
1294 1.3.4.2 mjf KASSERT(wl->wl_error_count >= errcnt);
1295 1.3.4.2 mjf wl->wl_error_count -= errcnt;
1296 1.3.4.2 mjf cv_broadcast(&wl->wl_reclaimable_cv);
1297 1.3.4.2 mjf }
1298 1.3.4.2 mjf }
1299 1.3.4.2 mjf
1300 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
1301 1.3.4.2 mjf brelse(bp, 0);
1302 1.3.4.2 mjf }
1303 1.3.4.2 mjf
1304 1.3.4.2 mjf /*
1305 1.3.4.2 mjf * Write transactions to disk + start I/O for contents
1306 1.3.4.2 mjf */
1307 1.3.4.2 mjf int
1308 1.3.4.2 mjf wapbl_flush(struct wapbl *wl, int waitfor)
1309 1.3.4.2 mjf {
1310 1.3.4.2 mjf struct buf *bp;
1311 1.3.4.2 mjf struct wapbl_entry *we;
1312 1.3.4.2 mjf off_t off;
1313 1.3.4.2 mjf off_t head;
1314 1.3.4.2 mjf off_t tail;
1315 1.3.4.2 mjf size_t delta = 0;
1316 1.3.4.2 mjf size_t flushsize;
1317 1.3.4.2 mjf size_t reserved;
1318 1.3.4.2 mjf int error = 0;
1319 1.3.4.2 mjf
1320 1.3.4.2 mjf /*
1321 1.3.4.2 mjf * Do a quick check to see if a full flush can be skipped
1322 1.3.4.2 mjf * This assumes that the flush callback does not need to be called
1323 1.3.4.2 mjf * unless there are other outstanding bufs.
1324 1.3.4.2 mjf */
1325 1.3.4.2 mjf if (!waitfor) {
1326 1.3.4.2 mjf size_t nbufs;
1327 1.3.4.2 mjf mutex_enter(&wl->wl_mtx); /* XXX need mutex here to
1328 1.3.4.2 mjf protect the KASSERTS */
1329 1.3.4.2 mjf nbufs = wl->wl_bufcount;
1330 1.3.4.2 mjf KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1331 1.3.4.2 mjf KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1332 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
1333 1.3.4.2 mjf if (nbufs == 0)
1334 1.3.4.2 mjf return 0;
1335 1.3.4.2 mjf }
1336 1.3.4.2 mjf
1337 1.3.4.2 mjf /*
1338 1.3.4.2 mjf * XXX we may consider using LK_UPGRADE here
1339 1.3.4.2 mjf * if we want to call flush from inside a transaction
1340 1.3.4.2 mjf */
1341 1.3.4.2 mjf rw_enter(&wl->wl_rwlock, RW_WRITER);
1342 1.3.4.2 mjf wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens,
1343 1.3.4.2 mjf wl->wl_dealloccnt);
1344 1.3.4.2 mjf
1345 1.3.4.2 mjf /*
1346 1.3.4.2 mjf * Now that we are fully locked and flushed,
1347 1.3.4.2 mjf * do another check for nothing to do.
1348 1.3.4.2 mjf */
1349 1.3.4.2 mjf if (wl->wl_bufcount == 0) {
1350 1.3.4.2 mjf goto out;
1351 1.3.4.2 mjf }
1352 1.3.4.2 mjf
1353 1.3.4.2 mjf #if 0
1354 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1355 1.3.4.2 mjf ("wapbl_flush thread %d.%d flushing entries with "
1356 1.3.4.2 mjf "bufcount=%zu bufbytes=%zu\n",
1357 1.3.4.2 mjf curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1358 1.3.4.2 mjf wl->wl_bufbytes));
1359 1.3.4.2 mjf #endif
1360 1.3.4.2 mjf
1361 1.3.4.2 mjf /* Calculate amount of space needed to flush */
1362 1.3.4.2 mjf flushsize = wapbl_transaction_len(wl);
1363 1.3.4.2 mjf
1364 1.3.4.2 mjf if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1365 1.3.4.2 mjf /*
1366 1.3.4.2 mjf * XXX this could be handled more gracefully, perhaps place
1367 1.3.4.2 mjf * only a partial transaction in the log and allow the
1368 1.3.4.2 mjf * remaining to flush without the protection of the journal.
1369 1.3.4.2 mjf */
1370 1.3.4.2 mjf panic("wapbl_flush: current transaction too big to flush\n");
1371 1.3.4.2 mjf }
1372 1.3.4.2 mjf
1373 1.3.4.2 mjf error = wapbl_truncate(wl, flushsize, 0);
1374 1.3.4.2 mjf if (error)
1375 1.3.4.2 mjf goto out2;
1376 1.3.4.2 mjf
1377 1.3.4.2 mjf off = wl->wl_head;
1378 1.3.4.2 mjf KASSERT((off == 0) || ((off >= wl->wl_circ_off) &&
1379 1.3.4.2 mjf (off < wl->wl_circ_off + wl->wl_circ_size)));
1380 1.3.4.2 mjf error = wapbl_write_blocks(wl, &off);
1381 1.3.4.2 mjf if (error)
1382 1.3.4.2 mjf goto out2;
1383 1.3.4.2 mjf error = wapbl_write_revocations(wl, &off);
1384 1.3.4.2 mjf if (error)
1385 1.3.4.2 mjf goto out2;
1386 1.3.4.2 mjf error = wapbl_write_inodes(wl, &off);
1387 1.3.4.2 mjf if (error)
1388 1.3.4.2 mjf goto out2;
1389 1.3.4.2 mjf
1390 1.3.4.2 mjf reserved = 0;
1391 1.3.4.2 mjf if (wl->wl_inohashcnt)
1392 1.3.4.2 mjf reserved = wapbl_transaction_inodes_len(wl);
1393 1.3.4.2 mjf
1394 1.3.4.2 mjf head = wl->wl_head;
1395 1.3.4.2 mjf tail = wl->wl_tail;
1396 1.3.4.2 mjf
1397 1.3.4.2 mjf wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1398 1.3.4.2 mjf &head, &tail);
1399 1.3.4.2 mjf #ifdef WAPBL_DEBUG
1400 1.3.4.2 mjf if (head != off) {
1401 1.3.4.2 mjf panic("lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1402 1.3.4.2 mjf " off=%"PRIdMAX" flush=%zu\n",
1403 1.3.4.2 mjf (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1404 1.3.4.2 mjf flushsize);
1405 1.3.4.2 mjf }
1406 1.3.4.2 mjf #else
1407 1.3.4.2 mjf KASSERT(head == off);
1408 1.3.4.2 mjf #endif
1409 1.3.4.2 mjf
1410 1.3.4.2 mjf /* Opportunistically move the tail forward if we can */
1411 1.3.4.2 mjf if (!wapbl_lazy_truncate) {
1412 1.3.4.2 mjf mutex_enter(&wl->wl_mtx);
1413 1.3.4.2 mjf delta = wl->wl_reclaimable_bytes;
1414 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
1415 1.3.4.2 mjf wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1416 1.3.4.2 mjf &head, &tail);
1417 1.3.4.2 mjf }
1418 1.3.4.2 mjf
1419 1.3.4.2 mjf error = wapbl_write_commit(wl, head, tail);
1420 1.3.4.2 mjf if (error)
1421 1.3.4.2 mjf goto out2;
1422 1.3.4.2 mjf
1423 1.3.4.2 mjf /* poolme? or kmemme? */
1424 1.3.4.2 mjf we = wapbl_calloc(1, sizeof(*we));
1425 1.3.4.2 mjf
1426 1.3.4.2 mjf #ifdef WAPBL_DEBUG_BUFBYTES
1427 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1428 1.3.4.2 mjf ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1429 1.3.4.2 mjf " unsynced=%zu"
1430 1.3.4.2 mjf "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1431 1.3.4.2 mjf "inodes=%d\n",
1432 1.3.4.2 mjf curproc->p_pid, curlwp->l_lid, flushsize, delta,
1433 1.3.4.2 mjf wapbl_space_used(wl->wl_circ_size, head, tail),
1434 1.3.4.2 mjf wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1435 1.3.4.2 mjf wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1436 1.3.4.2 mjf wl->wl_inohashcnt));
1437 1.3.4.2 mjf #else
1438 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1439 1.3.4.2 mjf ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1440 1.3.4.2 mjf "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1441 1.3.4.2 mjf "inodes=%d\n",
1442 1.3.4.2 mjf curproc->p_pid, curlwp->l_lid, flushsize, delta,
1443 1.3.4.2 mjf wapbl_space_used(wl->wl_circ_size, head, tail),
1444 1.3.4.2 mjf wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1445 1.3.4.2 mjf wl->wl_dealloccnt, wl->wl_inohashcnt));
1446 1.3.4.2 mjf #endif
1447 1.3.4.2 mjf
1448 1.3.4.2 mjf
1449 1.3.4.2 mjf mutex_enter(&bufcache_lock);
1450 1.3.4.2 mjf mutex_enter(&wl->wl_mtx);
1451 1.3.4.2 mjf
1452 1.3.4.2 mjf wl->wl_reserved_bytes = reserved;
1453 1.3.4.2 mjf wl->wl_head = head;
1454 1.3.4.2 mjf wl->wl_tail = tail;
1455 1.3.4.2 mjf KASSERT(wl->wl_reclaimable_bytes >= delta);
1456 1.3.4.2 mjf wl->wl_reclaimable_bytes -= delta;
1457 1.3.4.2 mjf wl->wl_dealloccnt = 0;
1458 1.3.4.2 mjf #ifdef WAPBL_DEBUG_BUFBYTES
1459 1.3.4.2 mjf wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1460 1.3.4.2 mjf #endif
1461 1.3.4.2 mjf
1462 1.3.4.2 mjf we->we_wapbl = wl;
1463 1.3.4.2 mjf we->we_bufcount = wl->wl_bufcount;
1464 1.3.4.2 mjf #ifdef WAPBL_DEBUG_BUFBYTES
1465 1.3.4.2 mjf we->we_unsynced_bufbytes = wl->wl_bufbytes;
1466 1.3.4.2 mjf #endif
1467 1.3.4.2 mjf we->we_reclaimable_bytes = flushsize;
1468 1.3.4.2 mjf we->we_error = 0;
1469 1.3.4.2 mjf SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1470 1.3.4.2 mjf
1471 1.3.4.2 mjf /*
1472 1.3.4.2 mjf * this flushes bufs in reverse order than they were queued
1473 1.3.4.2 mjf * it shouldn't matter, but if we care we could use TAILQ instead.
1474 1.3.4.2 mjf * XXX Note they will get put on the lru queue when they flush
1475 1.3.4.2 mjf * so we might actually want to change this to preserve order.
1476 1.3.4.2 mjf */
1477 1.3.4.2 mjf while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) {
1478 1.3.4.2 mjf if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1479 1.3.4.2 mjf continue;
1480 1.3.4.2 mjf }
1481 1.3.4.2 mjf bp->b_iodone = wapbl_biodone;
1482 1.3.4.2 mjf bp->b_private = we;
1483 1.3.4.2 mjf bremfree(bp);
1484 1.3.4.2 mjf wapbl_remove_buf_locked(wl, bp);
1485 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
1486 1.3.4.2 mjf mutex_exit(&bufcache_lock);
1487 1.3.4.2 mjf bawrite(bp);
1488 1.3.4.2 mjf mutex_enter(&bufcache_lock);
1489 1.3.4.2 mjf mutex_enter(&wl->wl_mtx);
1490 1.3.4.2 mjf }
1491 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
1492 1.3.4.2 mjf mutex_exit(&bufcache_lock);
1493 1.3.4.2 mjf
1494 1.3.4.2 mjf #if 0
1495 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1496 1.3.4.2 mjf ("wapbl_flush thread %d.%d done flushing entries...\n",
1497 1.3.4.2 mjf curproc->p_pid, curlwp->l_lid));
1498 1.3.4.2 mjf #endif
1499 1.3.4.2 mjf
1500 1.3.4.2 mjf out:
1501 1.3.4.2 mjf
1502 1.3.4.2 mjf /*
1503 1.3.4.2 mjf * If the waitfor flag is set, don't return until everything is
1504 1.3.4.2 mjf * fully flushed and the on disk log is empty.
1505 1.3.4.2 mjf */
1506 1.3.4.2 mjf if (waitfor) {
1507 1.3.4.2 mjf error = wapbl_truncate(wl, wl->wl_circ_size -
1508 1.3.4.2 mjf wl->wl_reserved_bytes, wapbl_lazy_truncate);
1509 1.3.4.2 mjf }
1510 1.3.4.2 mjf
1511 1.3.4.2 mjf out2:
1512 1.3.4.2 mjf if (error) {
1513 1.3.4.2 mjf wl->wl_flush_abort(wl->wl_mount, wl->wl_deallocblks,
1514 1.3.4.2 mjf wl->wl_dealloclens, wl->wl_dealloccnt);
1515 1.3.4.2 mjf }
1516 1.3.4.2 mjf
1517 1.3.4.2 mjf #ifdef WAPBL_DEBUG_PRINT
1518 1.3.4.2 mjf if (error) {
1519 1.3.4.2 mjf pid_t pid = -1;
1520 1.3.4.2 mjf lwpid_t lid = -1;
1521 1.3.4.2 mjf if (curproc)
1522 1.3.4.2 mjf pid = curproc->p_pid;
1523 1.3.4.2 mjf if (curlwp)
1524 1.3.4.2 mjf lid = curlwp->l_lid;
1525 1.3.4.2 mjf mutex_enter(&wl->wl_mtx);
1526 1.3.4.2 mjf #ifdef WAPBL_DEBUG_BUFBYTES
1527 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1528 1.3.4.2 mjf ("wapbl_flush: thread %d.%d aborted flush: "
1529 1.3.4.2 mjf "error = %d\n"
1530 1.3.4.2 mjf "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1531 1.3.4.2 mjf "deallocs=%d inodes=%d\n"
1532 1.3.4.2 mjf "\terrcnt = %d, reclaimable=%zu reserved=%zu "
1533 1.3.4.2 mjf "unsynced=%zu\n",
1534 1.3.4.2 mjf pid, lid, error, wl->wl_bufcount,
1535 1.3.4.2 mjf wl->wl_bufbytes, wl->wl_bcount,
1536 1.3.4.2 mjf wl->wl_dealloccnt, wl->wl_inohashcnt,
1537 1.3.4.2 mjf wl->wl_error_count, wl->wl_reclaimable_bytes,
1538 1.3.4.2 mjf wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1539 1.3.4.2 mjf SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1540 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1541 1.3.4.2 mjf ("\tentry: bufcount = %zu, reclaimable = %zu, "
1542 1.3.4.2 mjf "error = %d, unsynced = %zu\n",
1543 1.3.4.2 mjf we->we_bufcount, we->we_reclaimable_bytes,
1544 1.3.4.2 mjf we->we_error, we->we_unsynced_bufbytes));
1545 1.3.4.2 mjf }
1546 1.3.4.2 mjf #else
1547 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1548 1.3.4.2 mjf ("wapbl_flush: thread %d.%d aborted flush: "
1549 1.3.4.2 mjf "error = %d\n"
1550 1.3.4.2 mjf "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1551 1.3.4.2 mjf "deallocs=%d inodes=%d\n"
1552 1.3.4.2 mjf "\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
1553 1.3.4.2 mjf pid, lid, error, wl->wl_bufcount,
1554 1.3.4.2 mjf wl->wl_bufbytes, wl->wl_bcount,
1555 1.3.4.2 mjf wl->wl_dealloccnt, wl->wl_inohashcnt,
1556 1.3.4.2 mjf wl->wl_error_count, wl->wl_reclaimable_bytes,
1557 1.3.4.2 mjf wl->wl_reserved_bytes));
1558 1.3.4.2 mjf SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1559 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1560 1.3.4.2 mjf ("\tentry: bufcount = %zu, reclaimable = %zu, "
1561 1.3.4.2 mjf "error = %d\n", we->we_bufcount,
1562 1.3.4.2 mjf we->we_reclaimable_bytes, we->we_error));
1563 1.3.4.2 mjf }
1564 1.3.4.2 mjf #endif
1565 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
1566 1.3.4.2 mjf }
1567 1.3.4.2 mjf #endif
1568 1.3.4.2 mjf
1569 1.3.4.2 mjf rw_exit(&wl->wl_rwlock);
1570 1.3.4.2 mjf return error;
1571 1.3.4.2 mjf }
1572 1.3.4.2 mjf
1573 1.3.4.2 mjf /****************************************************************/
1574 1.3.4.2 mjf
1575 1.3.4.2 mjf void
1576 1.3.4.2 mjf wapbl_jlock_assert(struct wapbl *wl)
1577 1.3.4.2 mjf {
1578 1.3.4.2 mjf
1579 1.3.4.2 mjf #ifdef WAPBL_DEBUG_SERIALIZE
1580 1.3.4.2 mjf KASSERT(rw_write_held(&wl->wl_rwlock));
1581 1.3.4.2 mjf #else
1582 1.3.4.2 mjf KASSERT(rw_read_held(&wl->wl_rwlock) || rw_write_held(&wl->wl_rwlock));
1583 1.3.4.2 mjf #endif
1584 1.3.4.2 mjf }
1585 1.3.4.2 mjf
1586 1.3.4.2 mjf void
1587 1.3.4.2 mjf wapbl_junlock_assert(struct wapbl *wl)
1588 1.3.4.2 mjf {
1589 1.3.4.2 mjf
1590 1.3.4.2 mjf #ifdef WAPBL_DEBUG_SERIALIZE
1591 1.3.4.2 mjf KASSERT(!rw_write_held(&wl->wl_rwlock));
1592 1.3.4.2 mjf #endif
1593 1.3.4.2 mjf }
1594 1.3.4.2 mjf
1595 1.3.4.2 mjf /****************************************************************/
1596 1.3.4.2 mjf
1597 1.3.4.2 mjf /* locks missing */
1598 1.3.4.2 mjf void
1599 1.3.4.2 mjf wapbl_print(struct wapbl *wl,
1600 1.3.4.2 mjf int full,
1601 1.3.4.2 mjf void (*pr)(const char *, ...))
1602 1.3.4.2 mjf {
1603 1.3.4.2 mjf struct buf *bp;
1604 1.3.4.2 mjf struct wapbl_entry *we;
1605 1.3.4.2 mjf (*pr)("wapbl %p", wl);
1606 1.3.4.2 mjf (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
1607 1.3.4.2 mjf wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
1608 1.3.4.2 mjf (*pr)("circ = %zu, header = %zu, head = %"PRIdMAX" tail = %"PRIdMAX"\n",
1609 1.3.4.2 mjf wl->wl_circ_size, wl->wl_circ_off,
1610 1.3.4.2 mjf (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
1611 1.3.4.2 mjf (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
1612 1.3.4.2 mjf wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
1613 1.3.4.2 mjf #ifdef WAPBL_DEBUG_BUFBYTES
1614 1.3.4.2 mjf (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1615 1.3.4.2 mjf "reserved = %zu errcnt = %d unsynced = %zu\n",
1616 1.3.4.2 mjf wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1617 1.3.4.2 mjf wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1618 1.3.4.2 mjf wl->wl_error_count, wl->wl_unsynced_bufbytes);
1619 1.3.4.2 mjf #else
1620 1.3.4.2 mjf (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
1621 1.3.4.2 mjf "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
1622 1.3.4.2 mjf wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
1623 1.3.4.2 mjf wl->wl_error_count);
1624 1.3.4.2 mjf #endif
1625 1.3.4.2 mjf (*pr)("\tdealloccnt = %d, dealloclim = %d\n",
1626 1.3.4.2 mjf wl->wl_dealloccnt, wl->wl_dealloclim);
1627 1.3.4.2 mjf (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
1628 1.3.4.2 mjf wl->wl_inohashcnt, wl->wl_inohashmask);
1629 1.3.4.2 mjf (*pr)("entries:\n");
1630 1.3.4.2 mjf SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1631 1.3.4.2 mjf #ifdef WAPBL_DEBUG_BUFBYTES
1632 1.3.4.2 mjf (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
1633 1.3.4.2 mjf "unsynced = %zu\n",
1634 1.3.4.2 mjf we->we_bufcount, we->we_reclaimable_bytes,
1635 1.3.4.2 mjf we->we_error, we->we_unsynced_bufbytes);
1636 1.3.4.2 mjf #else
1637 1.3.4.2 mjf (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
1638 1.3.4.2 mjf we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
1639 1.3.4.2 mjf #endif
1640 1.3.4.2 mjf }
1641 1.3.4.2 mjf if (full) {
1642 1.3.4.2 mjf int cnt = 0;
1643 1.3.4.2 mjf (*pr)("bufs =");
1644 1.3.4.2 mjf LIST_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
1645 1.3.4.2 mjf if (!LIST_NEXT(bp, b_wapbllist)) {
1646 1.3.4.2 mjf (*pr)(" %p", bp);
1647 1.3.4.2 mjf } else if ((++cnt % 6) == 0) {
1648 1.3.4.2 mjf (*pr)(" %p,\n\t", bp);
1649 1.3.4.2 mjf } else {
1650 1.3.4.2 mjf (*pr)(" %p,", bp);
1651 1.3.4.2 mjf }
1652 1.3.4.2 mjf }
1653 1.3.4.2 mjf (*pr)("\n");
1654 1.3.4.2 mjf
1655 1.3.4.2 mjf (*pr)("dealloced blks = ");
1656 1.3.4.2 mjf {
1657 1.3.4.2 mjf int i;
1658 1.3.4.2 mjf cnt = 0;
1659 1.3.4.2 mjf for (i = 0; i < wl->wl_dealloccnt; i++) {
1660 1.3.4.2 mjf (*pr)(" %"PRId64":%d,",
1661 1.3.4.2 mjf wl->wl_deallocblks[i],
1662 1.3.4.2 mjf wl->wl_dealloclens[i]);
1663 1.3.4.2 mjf if ((++cnt % 4) == 0) {
1664 1.3.4.2 mjf (*pr)("\n\t");
1665 1.3.4.2 mjf }
1666 1.3.4.2 mjf }
1667 1.3.4.2 mjf }
1668 1.3.4.2 mjf (*pr)("\n");
1669 1.3.4.2 mjf
1670 1.3.4.2 mjf (*pr)("registered inodes = ");
1671 1.3.4.2 mjf {
1672 1.3.4.2 mjf int i;
1673 1.3.4.2 mjf cnt = 0;
1674 1.3.4.2 mjf for (i = 0; i <= wl->wl_inohashmask; i++) {
1675 1.3.4.2 mjf struct wapbl_ino_head *wih;
1676 1.3.4.2 mjf struct wapbl_ino *wi;
1677 1.3.4.2 mjf
1678 1.3.4.2 mjf wih = &wl->wl_inohash[i];
1679 1.3.4.2 mjf LIST_FOREACH(wi, wih, wi_hash) {
1680 1.3.4.2 mjf if (wi->wi_ino == 0)
1681 1.3.4.2 mjf continue;
1682 1.3.4.2 mjf (*pr)(" %"PRId32"/0%06"PRIo32",",
1683 1.3.4.2 mjf wi->wi_ino, wi->wi_mode);
1684 1.3.4.2 mjf if ((++cnt % 4) == 0) {
1685 1.3.4.2 mjf (*pr)("\n\t");
1686 1.3.4.2 mjf }
1687 1.3.4.2 mjf }
1688 1.3.4.2 mjf }
1689 1.3.4.2 mjf (*pr)("\n");
1690 1.3.4.2 mjf }
1691 1.3.4.2 mjf }
1692 1.3.4.2 mjf }
1693 1.3.4.2 mjf
1694 1.3.4.2 mjf #if defined(WAPBL_DEBUG) || defined(DDB)
1695 1.3.4.2 mjf void
1696 1.3.4.2 mjf wapbl_dump(struct wapbl *wl)
1697 1.3.4.2 mjf {
1698 1.3.4.2 mjf #if defined(WAPBL_DEBUG)
1699 1.3.4.2 mjf if (!wl)
1700 1.3.4.2 mjf wl = wapbl_debug_wl;
1701 1.3.4.2 mjf #endif
1702 1.3.4.2 mjf if (!wl)
1703 1.3.4.2 mjf return;
1704 1.3.4.2 mjf wapbl_print(wl, 1, printf);
1705 1.3.4.2 mjf }
1706 1.3.4.2 mjf #endif
1707 1.3.4.2 mjf
1708 1.3.4.2 mjf /****************************************************************/
1709 1.3.4.2 mjf
1710 1.3.4.2 mjf void
1711 1.3.4.2 mjf wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len)
1712 1.3.4.2 mjf {
1713 1.3.4.2 mjf
1714 1.3.4.2 mjf wapbl_jlock_assert(wl);
1715 1.3.4.2 mjf
1716 1.3.4.2 mjf /* XXX should eventually instead tie this into resource estimation */
1717 1.3.4.2 mjf /* XXX this KASSERT needs locking/mutex analysis */
1718 1.3.4.2 mjf KASSERT(wl->wl_dealloccnt < wl->wl_dealloclim);
1719 1.3.4.2 mjf wl->wl_deallocblks[wl->wl_dealloccnt] = blk;
1720 1.3.4.2 mjf wl->wl_dealloclens[wl->wl_dealloccnt] = len;
1721 1.3.4.2 mjf wl->wl_dealloccnt++;
1722 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
1723 1.3.4.2 mjf ("wapbl_register_deallocation: blk=%"PRId64" len=%d\n", blk, len));
1724 1.3.4.2 mjf }
1725 1.3.4.2 mjf
1726 1.3.4.2 mjf /****************************************************************/
1727 1.3.4.2 mjf
1728 1.3.4.2 mjf static void
1729 1.3.4.2 mjf wapbl_inodetrk_init(struct wapbl *wl, u_int size)
1730 1.3.4.2 mjf {
1731 1.3.4.2 mjf
1732 1.3.4.2 mjf wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
1733 1.3.4.2 mjf if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) {
1734 1.3.4.2 mjf pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0,
1735 1.3.4.2 mjf "wapblinopl", &pool_allocator_nointr, IPL_NONE);
1736 1.3.4.2 mjf }
1737 1.3.4.2 mjf }
1738 1.3.4.2 mjf
1739 1.3.4.2 mjf static void
1740 1.3.4.2 mjf wapbl_inodetrk_free(struct wapbl *wl)
1741 1.3.4.2 mjf {
1742 1.3.4.2 mjf
1743 1.3.4.2 mjf /* XXX this KASSERT needs locking/mutex analysis */
1744 1.3.4.2 mjf KASSERT(wl->wl_inohashcnt == 0);
1745 1.3.4.2 mjf hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
1746 1.3.4.2 mjf if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) {
1747 1.3.4.2 mjf pool_destroy(&wapbl_ino_pool);
1748 1.3.4.2 mjf }
1749 1.3.4.2 mjf }
1750 1.3.4.2 mjf
1751 1.3.4.2 mjf static struct wapbl_ino *
1752 1.3.4.2 mjf wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
1753 1.3.4.2 mjf {
1754 1.3.4.2 mjf struct wapbl_ino_head *wih;
1755 1.3.4.2 mjf struct wapbl_ino *wi;
1756 1.3.4.2 mjf
1757 1.3.4.2 mjf KASSERT(mutex_owned(&wl->wl_mtx));
1758 1.3.4.2 mjf
1759 1.3.4.2 mjf wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1760 1.3.4.2 mjf LIST_FOREACH(wi, wih, wi_hash) {
1761 1.3.4.2 mjf if (ino == wi->wi_ino)
1762 1.3.4.2 mjf return wi;
1763 1.3.4.2 mjf }
1764 1.3.4.2 mjf return 0;
1765 1.3.4.2 mjf }
1766 1.3.4.2 mjf
1767 1.3.4.2 mjf void
1768 1.3.4.2 mjf wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1769 1.3.4.2 mjf {
1770 1.3.4.2 mjf struct wapbl_ino_head *wih;
1771 1.3.4.2 mjf struct wapbl_ino *wi;
1772 1.3.4.2 mjf
1773 1.3.4.2 mjf wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
1774 1.3.4.2 mjf
1775 1.3.4.2 mjf mutex_enter(&wl->wl_mtx);
1776 1.3.4.2 mjf if (wapbl_inodetrk_get(wl, ino) == NULL) {
1777 1.3.4.2 mjf wi->wi_ino = ino;
1778 1.3.4.2 mjf wi->wi_mode = mode;
1779 1.3.4.2 mjf wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
1780 1.3.4.2 mjf LIST_INSERT_HEAD(wih, wi, wi_hash);
1781 1.3.4.2 mjf wl->wl_inohashcnt++;
1782 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_INODE,
1783 1.3.4.2 mjf ("wapbl_register_inode: ino=%"PRId64"\n", ino));
1784 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
1785 1.3.4.2 mjf } else {
1786 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
1787 1.3.4.2 mjf pool_put(&wapbl_ino_pool, wi);
1788 1.3.4.2 mjf }
1789 1.3.4.2 mjf }
1790 1.3.4.2 mjf
1791 1.3.4.2 mjf void
1792 1.3.4.2 mjf wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
1793 1.3.4.2 mjf {
1794 1.3.4.2 mjf struct wapbl_ino *wi;
1795 1.3.4.2 mjf
1796 1.3.4.2 mjf mutex_enter(&wl->wl_mtx);
1797 1.3.4.2 mjf wi = wapbl_inodetrk_get(wl, ino);
1798 1.3.4.2 mjf if (wi) {
1799 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_INODE,
1800 1.3.4.2 mjf ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
1801 1.3.4.2 mjf KASSERT(wl->wl_inohashcnt > 0);
1802 1.3.4.2 mjf wl->wl_inohashcnt--;
1803 1.3.4.2 mjf LIST_REMOVE(wi, wi_hash);
1804 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
1805 1.3.4.2 mjf
1806 1.3.4.2 mjf pool_put(&wapbl_ino_pool, wi);
1807 1.3.4.2 mjf } else {
1808 1.3.4.2 mjf mutex_exit(&wl->wl_mtx);
1809 1.3.4.2 mjf }
1810 1.3.4.2 mjf }
1811 1.3.4.2 mjf
1812 1.3.4.2 mjf /****************************************************************/
1813 1.3.4.2 mjf
1814 1.3.4.2 mjf static __inline size_t
1815 1.3.4.2 mjf wapbl_transaction_inodes_len(struct wapbl *wl)
1816 1.3.4.2 mjf {
1817 1.3.4.2 mjf int blocklen = 1<<wl->wl_log_dev_bshift;
1818 1.3.4.2 mjf int iph;
1819 1.3.4.2 mjf
1820 1.3.4.2 mjf /* Calculate number of inodes described in a inodelist header */
1821 1.3.4.2 mjf iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
1822 1.3.4.2 mjf sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
1823 1.3.4.2 mjf
1824 1.3.4.2 mjf KASSERT(iph > 0);
1825 1.3.4.2 mjf
1826 1.3.4.2 mjf return MAX(1, howmany(wl->wl_inohashcnt, iph))*blocklen;
1827 1.3.4.2 mjf }
1828 1.3.4.2 mjf
1829 1.3.4.2 mjf
1830 1.3.4.2 mjf /* Calculate amount of space a transaction will take on disk */
1831 1.3.4.2 mjf static size_t
1832 1.3.4.2 mjf wapbl_transaction_len(struct wapbl *wl)
1833 1.3.4.2 mjf {
1834 1.3.4.2 mjf int blocklen = 1<<wl->wl_log_dev_bshift;
1835 1.3.4.2 mjf size_t len;
1836 1.3.4.2 mjf int bph;
1837 1.3.4.2 mjf
1838 1.3.4.2 mjf /* Calculate number of blocks described in a blocklist header */
1839 1.3.4.2 mjf bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1840 1.3.4.2 mjf sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1841 1.3.4.2 mjf
1842 1.3.4.2 mjf KASSERT(bph > 0);
1843 1.3.4.2 mjf
1844 1.3.4.2 mjf len = wl->wl_bcount;
1845 1.3.4.2 mjf len += howmany(wl->wl_bufcount, bph)*blocklen;
1846 1.3.4.2 mjf len += howmany(wl->wl_dealloccnt, bph)*blocklen;
1847 1.3.4.2 mjf len += wapbl_transaction_inodes_len(wl);
1848 1.3.4.2 mjf
1849 1.3.4.2 mjf return len;
1850 1.3.4.2 mjf }
1851 1.3.4.2 mjf
1852 1.3.4.2 mjf /*
1853 1.3.4.2 mjf * Perform commit operation
1854 1.3.4.2 mjf *
1855 1.3.4.2 mjf * Note that generation number incrementation needs to
1856 1.3.4.2 mjf * be protected against racing with other invocations
1857 1.3.4.2 mjf * of wapbl_commit. This is ok since this routine
1858 1.3.4.2 mjf * is only invoked from wapbl_flush
1859 1.3.4.2 mjf */
1860 1.3.4.2 mjf static int
1861 1.3.4.2 mjf wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
1862 1.3.4.2 mjf {
1863 1.3.4.2 mjf struct wapbl_wc_header *wc = wl->wl_wc_header;
1864 1.3.4.2 mjf struct timespec ts;
1865 1.3.4.2 mjf int error;
1866 1.3.4.2 mjf int force = 1;
1867 1.3.4.2 mjf
1868 1.3.4.2 mjf /* XXX Calc checksum here, instead we do this for now */
1869 1.3.4.2 mjf error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1870 1.3.4.2 mjf if (error) {
1871 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1872 1.3.4.2 mjf ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1873 1.3.4.2 mjf "returned %d\n", wl->wl_devvp->v_rdev, error));
1874 1.3.4.2 mjf }
1875 1.3.4.2 mjf
1876 1.3.4.2 mjf wc->wc_head = head;
1877 1.3.4.2 mjf wc->wc_tail = tail;
1878 1.3.4.2 mjf wc->wc_checksum = 0;
1879 1.3.4.2 mjf wc->wc_version = 1;
1880 1.3.4.2 mjf getnanotime(&ts);
1881 1.3.4.3 mjf wc->wc_time = ts.tv_sec;
1882 1.3.4.2 mjf wc->wc_timensec = ts.tv_nsec;
1883 1.3.4.2 mjf
1884 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1885 1.3.4.2 mjf ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
1886 1.3.4.2 mjf (intmax_t)head, (intmax_t)tail));
1887 1.3.4.2 mjf
1888 1.3.4.2 mjf /*
1889 1.3.4.2 mjf * XXX if generation will rollover, then first zero
1890 1.3.4.2 mjf * over second commit header before trying to write both headers.
1891 1.3.4.2 mjf */
1892 1.3.4.2 mjf
1893 1.3.4.2 mjf error = wapbl_write(wc, wc->wc_len, wl->wl_devvp,
1894 1.3.4.2 mjf wl->wl_logpbn + wc->wc_generation % 2);
1895 1.3.4.2 mjf if (error)
1896 1.3.4.2 mjf return error;
1897 1.3.4.2 mjf
1898 1.3.4.2 mjf error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED);
1899 1.3.4.2 mjf if (error) {
1900 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1901 1.3.4.2 mjf ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%x "
1902 1.3.4.2 mjf "returned %d\n", wl->wl_devvp->v_rdev, error));
1903 1.3.4.2 mjf }
1904 1.3.4.2 mjf
1905 1.3.4.2 mjf /*
1906 1.3.4.2 mjf * If the generation number was zero, write it out a second time.
1907 1.3.4.2 mjf * This handles initialization and generation number rollover
1908 1.3.4.2 mjf */
1909 1.3.4.2 mjf if (wc->wc_generation++ == 0) {
1910 1.3.4.2 mjf error = wapbl_write_commit(wl, head, tail);
1911 1.3.4.2 mjf /*
1912 1.3.4.2 mjf * This panic should be able to be removed if we do the
1913 1.3.4.2 mjf * zero'ing mentioned above, and we are certain to roll
1914 1.3.4.2 mjf * back generation number on failure.
1915 1.3.4.2 mjf */
1916 1.3.4.2 mjf if (error)
1917 1.3.4.2 mjf panic("wapbl_write_commit: error writing duplicate "
1918 1.3.4.2 mjf "log header: %d\n", error);
1919 1.3.4.2 mjf }
1920 1.3.4.2 mjf return 0;
1921 1.3.4.2 mjf }
1922 1.3.4.2 mjf
1923 1.3.4.2 mjf /* Returns new offset value */
1924 1.3.4.2 mjf static int
1925 1.3.4.2 mjf wapbl_write_blocks(struct wapbl *wl, off_t *offp)
1926 1.3.4.2 mjf {
1927 1.3.4.2 mjf struct wapbl_wc_blocklist *wc =
1928 1.3.4.2 mjf (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
1929 1.3.4.2 mjf int blocklen = 1<<wl->wl_log_dev_bshift;
1930 1.3.4.2 mjf int bph;
1931 1.3.4.2 mjf struct buf *bp;
1932 1.3.4.2 mjf off_t off = *offp;
1933 1.3.4.2 mjf int error;
1934 1.3.4.3 mjf size_t padding;
1935 1.3.4.2 mjf
1936 1.3.4.2 mjf KASSERT(rw_write_held(&wl->wl_rwlock));
1937 1.3.4.2 mjf
1938 1.3.4.2 mjf bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
1939 1.3.4.2 mjf sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
1940 1.3.4.2 mjf
1941 1.3.4.2 mjf bp = LIST_FIRST(&wl->wl_bufs);
1942 1.3.4.2 mjf
1943 1.3.4.2 mjf while (bp) {
1944 1.3.4.2 mjf int cnt;
1945 1.3.4.2 mjf struct buf *obp = bp;
1946 1.3.4.2 mjf
1947 1.3.4.2 mjf KASSERT(bp->b_flags & B_LOCKED);
1948 1.3.4.2 mjf
1949 1.3.4.2 mjf wc->wc_type = WAPBL_WC_BLOCKS;
1950 1.3.4.2 mjf wc->wc_len = blocklen;
1951 1.3.4.2 mjf wc->wc_blkcount = 0;
1952 1.3.4.2 mjf while (bp && (wc->wc_blkcount < bph)) {
1953 1.3.4.2 mjf /*
1954 1.3.4.2 mjf * Make sure all the physical block numbers are up to
1955 1.3.4.2 mjf * date. If this is not always true on a given
1956 1.3.4.2 mjf * filesystem, then VOP_BMAP must be called. We
1957 1.3.4.2 mjf * could call VOP_BMAP here, or else in the filesystem
1958 1.3.4.2 mjf * specific flush callback, although neither of those
1959 1.3.4.2 mjf * solutions allow us to take the vnode lock. If a
1960 1.3.4.2 mjf * filesystem requires that we must take the vnode lock
1961 1.3.4.2 mjf * to call VOP_BMAP, then we can probably do it in
1962 1.3.4.2 mjf * bwrite when the vnode lock should already be held
1963 1.3.4.2 mjf * by the invoking code.
1964 1.3.4.2 mjf */
1965 1.3.4.2 mjf KASSERT((bp->b_vp->v_type == VBLK) ||
1966 1.3.4.2 mjf (bp->b_blkno != bp->b_lblkno));
1967 1.3.4.2 mjf KASSERT(bp->b_blkno > 0);
1968 1.3.4.2 mjf
1969 1.3.4.2 mjf wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
1970 1.3.4.2 mjf wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
1971 1.3.4.2 mjf wc->wc_len += bp->b_bcount;
1972 1.3.4.2 mjf wc->wc_blkcount++;
1973 1.3.4.2 mjf bp = LIST_NEXT(bp, b_wapbllist);
1974 1.3.4.2 mjf }
1975 1.3.4.3 mjf if (wc->wc_len % blocklen != 0) {
1976 1.3.4.3 mjf padding = blocklen - wc->wc_len % blocklen;
1977 1.3.4.3 mjf wc->wc_len += padding;
1978 1.3.4.3 mjf } else {
1979 1.3.4.3 mjf padding = 0;
1980 1.3.4.3 mjf }
1981 1.3.4.3 mjf
1982 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_WRITE,
1983 1.3.4.3 mjf ("wapbl_write_blocks: len = %u (padding %zu) off = %"PRIdMAX"\n",
1984 1.3.4.3 mjf wc->wc_len, padding, (intmax_t)off));
1985 1.3.4.2 mjf
1986 1.3.4.2 mjf error = wapbl_circ_write(wl, wc, blocklen, &off);
1987 1.3.4.2 mjf if (error)
1988 1.3.4.2 mjf return error;
1989 1.3.4.2 mjf bp = obp;
1990 1.3.4.2 mjf cnt = 0;
1991 1.3.4.2 mjf while (bp && (cnt++ < bph)) {
1992 1.3.4.2 mjf error = wapbl_circ_write(wl, bp->b_data,
1993 1.3.4.2 mjf bp->b_bcount, &off);
1994 1.3.4.2 mjf if (error)
1995 1.3.4.2 mjf return error;
1996 1.3.4.2 mjf bp = LIST_NEXT(bp, b_wapbllist);
1997 1.3.4.2 mjf }
1998 1.3.4.3 mjf if (padding) {
1999 1.3.4.3 mjf void *zero;
2000 1.3.4.3 mjf
2001 1.3.4.3 mjf zero = wapbl_malloc(padding);
2002 1.3.4.3 mjf memset(zero, 0, padding);
2003 1.3.4.3 mjf error = wapbl_circ_write(wl, zero, padding, &off);
2004 1.3.4.3 mjf wapbl_free(zero);
2005 1.3.4.3 mjf if (error)
2006 1.3.4.3 mjf return error;
2007 1.3.4.3 mjf }
2008 1.3.4.2 mjf }
2009 1.3.4.2 mjf *offp = off;
2010 1.3.4.2 mjf return 0;
2011 1.3.4.2 mjf }
2012 1.3.4.2 mjf
2013 1.3.4.2 mjf static int
2014 1.3.4.2 mjf wapbl_write_revocations(struct wapbl *wl, off_t *offp)
2015 1.3.4.2 mjf {
2016 1.3.4.2 mjf struct wapbl_wc_blocklist *wc =
2017 1.3.4.2 mjf (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2018 1.3.4.2 mjf int i;
2019 1.3.4.2 mjf int blocklen = 1<<wl->wl_log_dev_bshift;
2020 1.3.4.2 mjf int bph;
2021 1.3.4.2 mjf off_t off = *offp;
2022 1.3.4.2 mjf int error;
2023 1.3.4.2 mjf
2024 1.3.4.2 mjf if (wl->wl_dealloccnt == 0)
2025 1.3.4.2 mjf return 0;
2026 1.3.4.2 mjf
2027 1.3.4.2 mjf bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
2028 1.3.4.2 mjf sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
2029 1.3.4.2 mjf
2030 1.3.4.2 mjf i = 0;
2031 1.3.4.2 mjf while (i < wl->wl_dealloccnt) {
2032 1.3.4.2 mjf wc->wc_type = WAPBL_WC_REVOCATIONS;
2033 1.3.4.2 mjf wc->wc_len = blocklen;
2034 1.3.4.2 mjf wc->wc_blkcount = 0;
2035 1.3.4.2 mjf while ((i < wl->wl_dealloccnt) && (wc->wc_blkcount < bph)) {
2036 1.3.4.2 mjf wc->wc_blocks[wc->wc_blkcount].wc_daddr =
2037 1.3.4.2 mjf wl->wl_deallocblks[i];
2038 1.3.4.2 mjf wc->wc_blocks[wc->wc_blkcount].wc_dlen =
2039 1.3.4.2 mjf wl->wl_dealloclens[i];
2040 1.3.4.2 mjf wc->wc_blkcount++;
2041 1.3.4.2 mjf i++;
2042 1.3.4.2 mjf }
2043 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2044 1.3.4.2 mjf ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
2045 1.3.4.2 mjf wc->wc_len, (intmax_t)off));
2046 1.3.4.2 mjf error = wapbl_circ_write(wl, wc, blocklen, &off);
2047 1.3.4.2 mjf if (error)
2048 1.3.4.2 mjf return error;
2049 1.3.4.2 mjf }
2050 1.3.4.2 mjf *offp = off;
2051 1.3.4.2 mjf return 0;
2052 1.3.4.2 mjf }
2053 1.3.4.2 mjf
2054 1.3.4.2 mjf static int
2055 1.3.4.2 mjf wapbl_write_inodes(struct wapbl *wl, off_t *offp)
2056 1.3.4.2 mjf {
2057 1.3.4.2 mjf struct wapbl_wc_inodelist *wc =
2058 1.3.4.2 mjf (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
2059 1.3.4.2 mjf int i;
2060 1.3.4.3 mjf int blocklen = 1 << wl->wl_log_dev_bshift;
2061 1.3.4.2 mjf off_t off = *offp;
2062 1.3.4.2 mjf int error;
2063 1.3.4.2 mjf
2064 1.3.4.2 mjf struct wapbl_ino_head *wih;
2065 1.3.4.2 mjf struct wapbl_ino *wi;
2066 1.3.4.2 mjf int iph;
2067 1.3.4.2 mjf
2068 1.3.4.2 mjf iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2069 1.3.4.2 mjf sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2070 1.3.4.2 mjf
2071 1.3.4.2 mjf i = 0;
2072 1.3.4.2 mjf wih = &wl->wl_inohash[0];
2073 1.3.4.2 mjf wi = 0;
2074 1.3.4.2 mjf do {
2075 1.3.4.2 mjf wc->wc_type = WAPBL_WC_INODES;
2076 1.3.4.2 mjf wc->wc_len = blocklen;
2077 1.3.4.2 mjf wc->wc_inocnt = 0;
2078 1.3.4.2 mjf wc->wc_clear = (i == 0);
2079 1.3.4.2 mjf while ((i < wl->wl_inohashcnt) && (wc->wc_inocnt < iph)) {
2080 1.3.4.2 mjf while (!wi) {
2081 1.3.4.2 mjf KASSERT((wih - &wl->wl_inohash[0])
2082 1.3.4.2 mjf <= wl->wl_inohashmask);
2083 1.3.4.2 mjf wi = LIST_FIRST(wih++);
2084 1.3.4.2 mjf }
2085 1.3.4.2 mjf wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2086 1.3.4.2 mjf wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2087 1.3.4.2 mjf wc->wc_inocnt++;
2088 1.3.4.2 mjf i++;
2089 1.3.4.2 mjf wi = LIST_NEXT(wi, wi_hash);
2090 1.3.4.2 mjf }
2091 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2092 1.3.4.2 mjf ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2093 1.3.4.2 mjf wc->wc_len, (intmax_t)off));
2094 1.3.4.2 mjf error = wapbl_circ_write(wl, wc, blocklen, &off);
2095 1.3.4.2 mjf if (error)
2096 1.3.4.2 mjf return error;
2097 1.3.4.2 mjf } while (i < wl->wl_inohashcnt);
2098 1.3.4.2 mjf
2099 1.3.4.2 mjf *offp = off;
2100 1.3.4.2 mjf return 0;
2101 1.3.4.2 mjf }
2102 1.3.4.2 mjf
2103 1.3.4.2 mjf #endif /* _KERNEL */
2104 1.3.4.2 mjf
2105 1.3.4.2 mjf /****************************************************************/
2106 1.3.4.2 mjf
2107 1.3.4.2 mjf #ifdef _KERNEL
2108 1.3.4.2 mjf static struct pool wapbl_blk_pool;
2109 1.3.4.2 mjf static int wapbl_blk_pool_refcount;
2110 1.3.4.2 mjf #endif
2111 1.3.4.2 mjf struct wapbl_blk {
2112 1.3.4.2 mjf LIST_ENTRY(wapbl_blk) wb_hash;
2113 1.3.4.2 mjf daddr_t wb_blk;
2114 1.3.4.2 mjf off_t wb_off; /* Offset of this block in the log */
2115 1.3.4.2 mjf };
2116 1.3.4.2 mjf #define WAPBL_BLKPOOL_MIN 83
2117 1.3.4.2 mjf
2118 1.3.4.2 mjf static void
2119 1.3.4.2 mjf wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2120 1.3.4.2 mjf {
2121 1.3.4.2 mjf if (size < WAPBL_BLKPOOL_MIN)
2122 1.3.4.2 mjf size = WAPBL_BLKPOOL_MIN;
2123 1.3.4.2 mjf KASSERT(wr->wr_blkhash == 0);
2124 1.3.4.2 mjf #ifdef _KERNEL
2125 1.3.4.2 mjf wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2126 1.3.4.2 mjf if (atomic_inc_uint_nv(&wapbl_blk_pool_refcount) == 1) {
2127 1.3.4.2 mjf pool_init(&wapbl_blk_pool, sizeof(struct wapbl_blk), 0, 0, 0,
2128 1.3.4.2 mjf "wapblblkpl", &pool_allocator_nointr, IPL_NONE);
2129 1.3.4.2 mjf }
2130 1.3.4.2 mjf #else /* ! _KERNEL */
2131 1.3.4.2 mjf /* Manually implement hashinit */
2132 1.3.4.2 mjf {
2133 1.3.4.2 mjf int i;
2134 1.3.4.2 mjf unsigned long hashsize;
2135 1.3.4.2 mjf for (hashsize = 1; hashsize < size; hashsize <<= 1)
2136 1.3.4.2 mjf continue;
2137 1.3.4.2 mjf wr->wr_blkhash = wapbl_malloc(hashsize * sizeof(*wr->wr_blkhash));
2138 1.3.4.2 mjf for (i = 0; i < wr->wr_blkhashmask; i++)
2139 1.3.4.2 mjf LIST_INIT(&wr->wr_blkhash[i]);
2140 1.3.4.2 mjf wr->wr_blkhashmask = hashsize - 1;
2141 1.3.4.2 mjf }
2142 1.3.4.2 mjf #endif /* ! _KERNEL */
2143 1.3.4.2 mjf }
2144 1.3.4.2 mjf
2145 1.3.4.2 mjf static void
2146 1.3.4.2 mjf wapbl_blkhash_free(struct wapbl_replay *wr)
2147 1.3.4.2 mjf {
2148 1.3.4.2 mjf KASSERT(wr->wr_blkhashcnt == 0);
2149 1.3.4.2 mjf #ifdef _KERNEL
2150 1.3.4.2 mjf hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2151 1.3.4.2 mjf if (atomic_dec_uint_nv(&wapbl_blk_pool_refcount) == 0) {
2152 1.3.4.2 mjf pool_destroy(&wapbl_blk_pool);
2153 1.3.4.2 mjf }
2154 1.3.4.2 mjf #else /* ! _KERNEL */
2155 1.3.4.2 mjf wapbl_free(wr->wr_blkhash);
2156 1.3.4.2 mjf #endif /* ! _KERNEL */
2157 1.3.4.2 mjf }
2158 1.3.4.2 mjf
2159 1.3.4.2 mjf static struct wapbl_blk *
2160 1.3.4.2 mjf wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2161 1.3.4.2 mjf {
2162 1.3.4.2 mjf struct wapbl_blk_head *wbh;
2163 1.3.4.2 mjf struct wapbl_blk *wb;
2164 1.3.4.2 mjf wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2165 1.3.4.2 mjf LIST_FOREACH(wb, wbh, wb_hash) {
2166 1.3.4.2 mjf if (blk == wb->wb_blk)
2167 1.3.4.2 mjf return wb;
2168 1.3.4.2 mjf }
2169 1.3.4.2 mjf return 0;
2170 1.3.4.2 mjf }
2171 1.3.4.2 mjf
2172 1.3.4.2 mjf static void
2173 1.3.4.2 mjf wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2174 1.3.4.2 mjf {
2175 1.3.4.2 mjf struct wapbl_blk_head *wbh;
2176 1.3.4.2 mjf struct wapbl_blk *wb;
2177 1.3.4.2 mjf wb = wapbl_blkhash_get(wr, blk);
2178 1.3.4.2 mjf if (wb) {
2179 1.3.4.2 mjf KASSERT(wb->wb_blk == blk);
2180 1.3.4.2 mjf wb->wb_off = off;
2181 1.3.4.2 mjf } else {
2182 1.3.4.2 mjf #ifdef _KERNEL
2183 1.3.4.2 mjf wb = pool_get(&wapbl_blk_pool, PR_WAITOK);
2184 1.3.4.2 mjf #else /* ! _KERNEL */
2185 1.3.4.2 mjf wb = wapbl_malloc(sizeof(*wb));
2186 1.3.4.2 mjf #endif /* ! _KERNEL */
2187 1.3.4.2 mjf wb->wb_blk = blk;
2188 1.3.4.2 mjf wb->wb_off = off;
2189 1.3.4.2 mjf wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2190 1.3.4.2 mjf LIST_INSERT_HEAD(wbh, wb, wb_hash);
2191 1.3.4.2 mjf wr->wr_blkhashcnt++;
2192 1.3.4.2 mjf }
2193 1.3.4.2 mjf }
2194 1.3.4.2 mjf
2195 1.3.4.2 mjf static void
2196 1.3.4.2 mjf wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2197 1.3.4.2 mjf {
2198 1.3.4.2 mjf struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2199 1.3.4.2 mjf if (wb) {
2200 1.3.4.2 mjf KASSERT(wr->wr_blkhashcnt > 0);
2201 1.3.4.2 mjf wr->wr_blkhashcnt--;
2202 1.3.4.2 mjf LIST_REMOVE(wb, wb_hash);
2203 1.3.4.2 mjf #ifdef _KERNEL
2204 1.3.4.2 mjf pool_put(&wapbl_blk_pool, wb);
2205 1.3.4.2 mjf #else /* ! _KERNEL */
2206 1.3.4.2 mjf wapbl_free(wb);
2207 1.3.4.2 mjf #endif /* ! _KERNEL */
2208 1.3.4.2 mjf }
2209 1.3.4.2 mjf }
2210 1.3.4.2 mjf
2211 1.3.4.2 mjf static void
2212 1.3.4.2 mjf wapbl_blkhash_clear(struct wapbl_replay *wr)
2213 1.3.4.2 mjf {
2214 1.3.4.2 mjf int i;
2215 1.3.4.2 mjf for (i = 0; i <= wr->wr_blkhashmask; i++) {
2216 1.3.4.2 mjf struct wapbl_blk *wb;
2217 1.3.4.2 mjf
2218 1.3.4.2 mjf while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2219 1.3.4.2 mjf KASSERT(wr->wr_blkhashcnt > 0);
2220 1.3.4.2 mjf wr->wr_blkhashcnt--;
2221 1.3.4.2 mjf LIST_REMOVE(wb, wb_hash);
2222 1.3.4.2 mjf #ifdef _KERNEL
2223 1.3.4.2 mjf pool_put(&wapbl_blk_pool, wb);
2224 1.3.4.2 mjf #else /* ! _KERNEL */
2225 1.3.4.2 mjf wapbl_free(wb);
2226 1.3.4.2 mjf #endif /* ! _KERNEL */
2227 1.3.4.2 mjf }
2228 1.3.4.2 mjf }
2229 1.3.4.2 mjf KASSERT(wr->wr_blkhashcnt == 0);
2230 1.3.4.2 mjf }
2231 1.3.4.2 mjf
2232 1.3.4.2 mjf /****************************************************************/
2233 1.3.4.2 mjf
2234 1.3.4.2 mjf static int
2235 1.3.4.2 mjf wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2236 1.3.4.2 mjf {
2237 1.3.4.2 mjf size_t slen;
2238 1.3.4.2 mjf off_t off = *offp;
2239 1.3.4.2 mjf int error;
2240 1.3.4.2 mjf
2241 1.3.4.3 mjf KASSERT(((len >> wr->wr_log_dev_bshift) <<
2242 1.3.4.3 mjf wr->wr_log_dev_bshift) == len);
2243 1.3.4.3 mjf if (off < wr->wr_circ_off)
2244 1.3.4.3 mjf off = wr->wr_circ_off;
2245 1.3.4.3 mjf slen = wr->wr_circ_off + wr->wr_circ_size - off;
2246 1.3.4.2 mjf if (slen < len) {
2247 1.3.4.2 mjf error = wapbl_read(data, slen, wr->wr_devvp,
2248 1.3.4.3 mjf wr->wr_logpbn + (off >> wr->wr_log_dev_bshift));
2249 1.3.4.2 mjf if (error)
2250 1.3.4.2 mjf return error;
2251 1.3.4.2 mjf data = (uint8_t *)data + slen;
2252 1.3.4.2 mjf len -= slen;
2253 1.3.4.3 mjf off = wr->wr_circ_off;
2254 1.3.4.2 mjf }
2255 1.3.4.2 mjf error = wapbl_read(data, len, wr->wr_devvp,
2256 1.3.4.3 mjf wr->wr_logpbn + (off >> wr->wr_log_dev_bshift));
2257 1.3.4.2 mjf if (error)
2258 1.3.4.2 mjf return error;
2259 1.3.4.2 mjf off += len;
2260 1.3.4.3 mjf if (off >= wr->wr_circ_off + wr->wr_circ_size)
2261 1.3.4.3 mjf off = wr->wr_circ_off;
2262 1.3.4.2 mjf *offp = off;
2263 1.3.4.2 mjf return 0;
2264 1.3.4.2 mjf }
2265 1.3.4.2 mjf
2266 1.3.4.2 mjf static void
2267 1.3.4.2 mjf wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2268 1.3.4.2 mjf {
2269 1.3.4.2 mjf size_t slen;
2270 1.3.4.2 mjf off_t off = *offp;
2271 1.3.4.2 mjf
2272 1.3.4.3 mjf KASSERT(((len >> wr->wr_log_dev_bshift) <<
2273 1.3.4.3 mjf wr->wr_log_dev_bshift) == len);
2274 1.3.4.2 mjf
2275 1.3.4.3 mjf if (off < wr->wr_circ_off)
2276 1.3.4.3 mjf off = wr->wr_circ_off;
2277 1.3.4.3 mjf slen = wr->wr_circ_off + wr->wr_circ_size - off;
2278 1.3.4.2 mjf if (slen < len) {
2279 1.3.4.2 mjf len -= slen;
2280 1.3.4.3 mjf off = wr->wr_circ_off;
2281 1.3.4.2 mjf }
2282 1.3.4.2 mjf off += len;
2283 1.3.4.3 mjf if (off >= wr->wr_circ_off + wr->wr_circ_size)
2284 1.3.4.3 mjf off = wr->wr_circ_off;
2285 1.3.4.2 mjf *offp = off;
2286 1.3.4.2 mjf }
2287 1.3.4.2 mjf
2288 1.3.4.2 mjf /****************************************************************/
2289 1.3.4.2 mjf
2290 1.3.4.2 mjf int
2291 1.3.4.2 mjf wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2292 1.3.4.2 mjf daddr_t off, size_t count, size_t blksize)
2293 1.3.4.2 mjf {
2294 1.3.4.2 mjf struct wapbl_replay *wr;
2295 1.3.4.2 mjf int error;
2296 1.3.4.2 mjf struct vnode *devvp;
2297 1.3.4.2 mjf daddr_t logpbn;
2298 1.3.4.2 mjf uint8_t *scratch;
2299 1.3.4.2 mjf struct wapbl_wc_header *wch;
2300 1.3.4.2 mjf struct wapbl_wc_header *wch2;
2301 1.3.4.2 mjf /* Use this until we read the actual log header */
2302 1.3.4.2 mjf int log_dev_bshift = DEV_BSHIFT;
2303 1.3.4.2 mjf size_t used;
2304 1.3.4.2 mjf
2305 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2306 1.3.4.2 mjf ("wapbl_replay_start: vp=%p off=%"PRId64 " count=%zu blksize=%zu\n",
2307 1.3.4.2 mjf vp, off, count, blksize));
2308 1.3.4.2 mjf
2309 1.3.4.2 mjf if (off < 0)
2310 1.3.4.2 mjf return EINVAL;
2311 1.3.4.2 mjf
2312 1.3.4.2 mjf if (blksize < DEV_BSIZE)
2313 1.3.4.2 mjf return EINVAL;
2314 1.3.4.2 mjf if (blksize % DEV_BSIZE)
2315 1.3.4.2 mjf return EINVAL;
2316 1.3.4.2 mjf
2317 1.3.4.2 mjf #ifdef _KERNEL
2318 1.3.4.2 mjf #if 0
2319 1.3.4.2 mjf /* XXX vp->v_size isn't reliably set for VBLK devices,
2320 1.3.4.2 mjf * especially root. However, we might still want to verify
2321 1.3.4.2 mjf * that the full load is readable */
2322 1.3.4.2 mjf if ((off + count) * blksize > vp->v_size)
2323 1.3.4.2 mjf return EINVAL;
2324 1.3.4.2 mjf #endif
2325 1.3.4.2 mjf
2326 1.3.4.2 mjf if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2327 1.3.4.2 mjf return error;
2328 1.3.4.2 mjf }
2329 1.3.4.2 mjf #else /* ! _KERNEL */
2330 1.3.4.2 mjf devvp = vp;
2331 1.3.4.2 mjf logpbn = off;
2332 1.3.4.2 mjf #endif /* ! _KERNEL */
2333 1.3.4.2 mjf
2334 1.3.4.2 mjf scratch = wapbl_malloc(MAXBSIZE);
2335 1.3.4.2 mjf
2336 1.3.4.2 mjf error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, logpbn);
2337 1.3.4.2 mjf if (error)
2338 1.3.4.2 mjf goto errout;
2339 1.3.4.2 mjf
2340 1.3.4.2 mjf wch = (struct wapbl_wc_header *)scratch;
2341 1.3.4.2 mjf wch2 =
2342 1.3.4.2 mjf (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
2343 1.3.4.2 mjf /* XXX verify checksums and magic numbers */
2344 1.3.4.2 mjf if (wch->wc_type != WAPBL_WC_HEADER) {
2345 1.3.4.2 mjf printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
2346 1.3.4.2 mjf error = EFTYPE;
2347 1.3.4.2 mjf goto errout;
2348 1.3.4.2 mjf }
2349 1.3.4.2 mjf
2350 1.3.4.2 mjf if (wch2->wc_generation > wch->wc_generation)
2351 1.3.4.2 mjf wch = wch2;
2352 1.3.4.2 mjf
2353 1.3.4.2 mjf wr = wapbl_calloc(1, sizeof(*wr));
2354 1.3.4.2 mjf
2355 1.3.4.2 mjf wr->wr_logvp = vp;
2356 1.3.4.2 mjf wr->wr_devvp = devvp;
2357 1.3.4.2 mjf wr->wr_logpbn = logpbn;
2358 1.3.4.2 mjf
2359 1.3.4.2 mjf wr->wr_scratch = scratch;
2360 1.3.4.2 mjf
2361 1.3.4.3 mjf wr->wr_log_dev_bshift = wch->wc_log_dev_bshift;
2362 1.3.4.3 mjf wr->wr_fs_dev_bshift = wch->wc_fs_dev_bshift;
2363 1.3.4.3 mjf wr->wr_circ_off = wch->wc_circ_off;
2364 1.3.4.3 mjf wr->wr_circ_size = wch->wc_circ_size;
2365 1.3.4.3 mjf wr->wr_generation = wch->wc_generation;
2366 1.3.4.2 mjf
2367 1.3.4.2 mjf used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
2368 1.3.4.2 mjf
2369 1.3.4.2 mjf WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2370 1.3.4.2 mjf ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
2371 1.3.4.2 mjf " len=%"PRId64" used=%zu\n",
2372 1.3.4.2 mjf wch->wc_head, wch->wc_tail, wch->wc_circ_off,
2373 1.3.4.2 mjf wch->wc_circ_size, used));
2374 1.3.4.2 mjf
2375 1.3.4.2 mjf wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
2376 1.3.4.2 mjf
2377 1.3.4.3 mjf error = wapbl_replay_process(wr, wch->wc_head, wch->wc_tail);
2378 1.3.4.2 mjf if (error) {
2379 1.3.4.2 mjf wapbl_replay_stop(wr);
2380 1.3.4.2 mjf wapbl_replay_free(wr);
2381 1.3.4.2 mjf return error;
2382 1.3.4.2 mjf }
2383 1.3.4.2 mjf
2384 1.3.4.2 mjf *wrp = wr;
2385 1.3.4.2 mjf return 0;
2386 1.3.4.2 mjf
2387 1.3.4.2 mjf errout:
2388 1.3.4.2 mjf wapbl_free(scratch);
2389 1.3.4.2 mjf return error;
2390 1.3.4.2 mjf }
2391 1.3.4.2 mjf
2392 1.3.4.2 mjf void
2393 1.3.4.2 mjf wapbl_replay_stop(struct wapbl_replay *wr)
2394 1.3.4.2 mjf {
2395 1.3.4.2 mjf
2396 1.3.4.3 mjf if (!wapbl_replay_isopen(wr))
2397 1.3.4.3 mjf return;
2398 1.3.4.2 mjf
2399 1.3.4.3 mjf WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
2400 1.3.4.2 mjf
2401 1.3.4.2 mjf wapbl_free(wr->wr_scratch);
2402 1.3.4.2 mjf wr->wr_scratch = 0;
2403 1.3.4.2 mjf
2404 1.3.4.2 mjf wr->wr_logvp = 0;
2405 1.3.4.2 mjf
2406 1.3.4.2 mjf wapbl_blkhash_clear(wr);
2407 1.3.4.2 mjf wapbl_blkhash_free(wr);
2408 1.3.4.2 mjf }
2409 1.3.4.2 mjf
2410 1.3.4.2 mjf void
2411 1.3.4.2 mjf wapbl_replay_free(struct wapbl_replay *wr)
2412 1.3.4.2 mjf {
2413 1.3.4.2 mjf
2414 1.3.4.2 mjf KDASSERT(!wapbl_replay_isopen(wr));
2415 1.3.4.2 mjf
2416 1.3.4.2 mjf if (wr->wr_inodes)
2417 1.3.4.2 mjf wapbl_free(wr->wr_inodes);
2418 1.3.4.2 mjf wapbl_free(wr);
2419 1.3.4.2 mjf }
2420 1.3.4.2 mjf
2421 1.3.4.3 mjf #ifdef _KERNEL
2422 1.3.4.2 mjf int
2423 1.3.4.2 mjf wapbl_replay_isopen1(struct wapbl_replay *wr)
2424 1.3.4.2 mjf {
2425 1.3.4.2 mjf
2426 1.3.4.2 mjf return wapbl_replay_isopen(wr);
2427 1.3.4.2 mjf }
2428 1.3.4.3 mjf #endif
2429 1.3.4.3 mjf
2430 1.3.4.3 mjf static void
2431 1.3.4.3 mjf wapbl_replay_process_blocks(struct wapbl_replay *wr, off_t *offp)
2432 1.3.4.3 mjf {
2433 1.3.4.3 mjf struct wapbl_wc_blocklist *wc =
2434 1.3.4.3 mjf (struct wapbl_wc_blocklist *)wr->wr_scratch;
2435 1.3.4.3 mjf int fsblklen = 1 << wr->wr_fs_dev_bshift;
2436 1.3.4.3 mjf int i, j, n;
2437 1.3.4.3 mjf
2438 1.3.4.3 mjf for (i = 0; i < wc->wc_blkcount; i++) {
2439 1.3.4.3 mjf /*
2440 1.3.4.3 mjf * Enter each physical block into the hashtable independently.
2441 1.3.4.3 mjf */
2442 1.3.4.3 mjf n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
2443 1.3.4.3 mjf for (j = 0; j < n; j++) {
2444 1.3.4.3 mjf wapbl_blkhash_ins(wr, wc->wc_blocks[i].wc_daddr + j,
2445 1.3.4.3 mjf *offp);
2446 1.3.4.3 mjf wapbl_circ_advance(wr, fsblklen, offp);
2447 1.3.4.3 mjf }
2448 1.3.4.3 mjf }
2449 1.3.4.3 mjf }
2450 1.3.4.3 mjf
2451 1.3.4.3 mjf static void
2452 1.3.4.3 mjf wapbl_replay_process_revocations(struct wapbl_replay *wr)
2453 1.3.4.3 mjf {
2454 1.3.4.3 mjf struct wapbl_wc_blocklist *wc =
2455 1.3.4.3 mjf (struct wapbl_wc_blocklist *)wr->wr_scratch;
2456 1.3.4.3 mjf int i, j, n;
2457 1.3.4.3 mjf
2458 1.3.4.3 mjf for (i = 0; i < wc->wc_blkcount; i++) {
2459 1.3.4.3 mjf /*
2460 1.3.4.3 mjf * Remove any blocks found from the hashtable.
2461 1.3.4.3 mjf */
2462 1.3.4.3 mjf n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
2463 1.3.4.3 mjf for (j = 0; j < n; j++)
2464 1.3.4.3 mjf wapbl_blkhash_rem(wr, wc->wc_blocks[i].wc_daddr + j);
2465 1.3.4.3 mjf }
2466 1.3.4.3 mjf }
2467 1.3.4.3 mjf
2468 1.3.4.3 mjf static void
2469 1.3.4.3 mjf wapbl_replay_process_inodes(struct wapbl_replay *wr, off_t oldoff, off_t newoff)
2470 1.3.4.3 mjf {
2471 1.3.4.3 mjf struct wapbl_wc_inodelist *wc =
2472 1.3.4.3 mjf (struct wapbl_wc_inodelist *)wr->wr_scratch;
2473 1.3.4.3 mjf /*
2474 1.3.4.3 mjf * Keep track of where we found this so location won't be
2475 1.3.4.3 mjf * overwritten.
2476 1.3.4.3 mjf */
2477 1.3.4.3 mjf if (wc->wc_clear) {
2478 1.3.4.3 mjf wr->wr_inodestail = oldoff;
2479 1.3.4.3 mjf wr->wr_inodescnt = 0;
2480 1.3.4.3 mjf if (wr->wr_inodes != NULL) {
2481 1.3.4.3 mjf wapbl_free(wr->wr_inodes);
2482 1.3.4.3 mjf wr->wr_inodes = NULL;
2483 1.3.4.3 mjf }
2484 1.3.4.3 mjf }
2485 1.3.4.3 mjf wr->wr_inodeshead = newoff;
2486 1.3.4.3 mjf if (wc->wc_inocnt == 0)
2487 1.3.4.3 mjf return;
2488 1.3.4.3 mjf
2489 1.3.4.3 mjf wr->wr_inodes = wapbl_realloc(wr->wr_inodes,
2490 1.3.4.3 mjf (wr->wr_inodescnt + wc->wc_inocnt) * sizeof(wc->wc_inodes[0]));
2491 1.3.4.3 mjf memcpy(&wr->wr_inodes[wr->wr_inodescnt], wc->wc_inodes,
2492 1.3.4.3 mjf wc->wc_inocnt * sizeof(wc->wc_inodes[0]));
2493 1.3.4.3 mjf wr->wr_inodescnt += wc->wc_inocnt;
2494 1.3.4.3 mjf }
2495 1.3.4.2 mjf
2496 1.3.4.2 mjf static int
2497 1.3.4.3 mjf wapbl_replay_process(struct wapbl_replay *wr, off_t head, off_t tail)
2498 1.3.4.2 mjf {
2499 1.3.4.2 mjf off_t off;
2500 1.3.4.2 mjf int error;
2501 1.3.4.2 mjf
2502 1.3.4.3 mjf int logblklen = 1 << wr->wr_log_dev_bshift;
2503 1.3.4.2 mjf
2504 1.3.4.2 mjf wapbl_blkhash_clear(wr);
2505 1.3.4.2 mjf
2506 1.3.4.3 mjf off = tail;
2507 1.3.4.3 mjf while (off != head) {
2508 1.3.4.2 mjf struct wapbl_wc_null *wcn;
2509 1.3.4.2 mjf off_t saveoff = off;
2510 1.3.4.2 mjf error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2511 1.3.4.2 mjf if (error)
2512 1.3.4.2 mjf goto errout;
2513 1.3.4.2 mjf wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2514 1.3.4.2 mjf switch (wcn->wc_type) {
2515 1.3.4.2 mjf case WAPBL_WC_BLOCKS:
2516 1.3.4.3 mjf wapbl_replay_process_blocks(wr, &off);
2517 1.3.4.2 mjf break;
2518 1.3.4.2 mjf
2519 1.3.4.2 mjf case WAPBL_WC_REVOCATIONS:
2520 1.3.4.3 mjf wapbl_replay_process_revocations(wr);
2521 1.3.4.2 mjf break;
2522 1.3.4.2 mjf
2523 1.3.4.2 mjf case WAPBL_WC_INODES:
2524 1.3.4.3 mjf wapbl_replay_process_inodes(wr, saveoff, off);
2525 1.3.4.2 mjf break;
2526 1.3.4.3 mjf
2527 1.3.4.2 mjf default:
2528 1.3.4.2 mjf printf("Unrecognized wapbl type: 0x%08x\n",
2529 1.3.4.2 mjf wcn->wc_type);
2530 1.3.4.2 mjf error = EFTYPE;
2531 1.3.4.2 mjf goto errout;
2532 1.3.4.2 mjf }
2533 1.3.4.2 mjf wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2534 1.3.4.2 mjf if (off != saveoff) {
2535 1.3.4.2 mjf printf("wapbl_replay: corrupted records\n");
2536 1.3.4.2 mjf error = EFTYPE;
2537 1.3.4.2 mjf goto errout;
2538 1.3.4.2 mjf }
2539 1.3.4.2 mjf }
2540 1.3.4.2 mjf return 0;
2541 1.3.4.2 mjf
2542 1.3.4.2 mjf errout:
2543 1.3.4.2 mjf wapbl_blkhash_clear(wr);
2544 1.3.4.2 mjf return error;
2545 1.3.4.2 mjf }
2546 1.3.4.2 mjf
2547 1.3.4.3 mjf #if 0
2548 1.3.4.2 mjf int
2549 1.3.4.2 mjf wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
2550 1.3.4.2 mjf {
2551 1.3.4.2 mjf off_t off;
2552 1.3.4.2 mjf int mismatchcnt = 0;
2553 1.3.4.3 mjf int logblklen = 1 << wr->wr_log_dev_bshift;
2554 1.3.4.3 mjf int fsblklen = 1 << wr->wr_fs_dev_bshift;
2555 1.3.4.2 mjf void *scratch1 = wapbl_malloc(MAXBSIZE);
2556 1.3.4.2 mjf void *scratch2 = wapbl_malloc(MAXBSIZE);
2557 1.3.4.2 mjf int error = 0;
2558 1.3.4.2 mjf
2559 1.3.4.2 mjf KDASSERT(wapbl_replay_isopen(wr));
2560 1.3.4.2 mjf
2561 1.3.4.2 mjf off = wch->wc_tail;
2562 1.3.4.2 mjf while (off != wch->wc_head) {
2563 1.3.4.2 mjf struct wapbl_wc_null *wcn;
2564 1.3.4.2 mjf #ifdef DEBUG
2565 1.3.4.2 mjf off_t saveoff = off;
2566 1.3.4.2 mjf #endif
2567 1.3.4.2 mjf error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
2568 1.3.4.2 mjf if (error)
2569 1.3.4.2 mjf goto out;
2570 1.3.4.2 mjf wcn = (struct wapbl_wc_null *)wr->wr_scratch;
2571 1.3.4.2 mjf switch (wcn->wc_type) {
2572 1.3.4.2 mjf case WAPBL_WC_BLOCKS:
2573 1.3.4.2 mjf {
2574 1.3.4.2 mjf struct wapbl_wc_blocklist *wc =
2575 1.3.4.2 mjf (struct wapbl_wc_blocklist *)wr->wr_scratch;
2576 1.3.4.2 mjf int i;
2577 1.3.4.2 mjf for (i = 0; i < wc->wc_blkcount; i++) {
2578 1.3.4.2 mjf int foundcnt = 0;
2579 1.3.4.2 mjf int dirtycnt = 0;
2580 1.3.4.2 mjf int j, n;
2581 1.3.4.2 mjf /*
2582 1.3.4.2 mjf * Check each physical block into the
2583 1.3.4.2 mjf * hashtable independently
2584 1.3.4.2 mjf */
2585 1.3.4.2 mjf n = wc->wc_blocks[i].wc_dlen >>
2586 1.3.4.2 mjf wch->wc_fs_dev_bshift;
2587 1.3.4.2 mjf for (j = 0; j < n; j++) {
2588 1.3.4.2 mjf struct wapbl_blk *wb =
2589 1.3.4.2 mjf wapbl_blkhash_get(wr,
2590 1.3.4.2 mjf wc->wc_blocks[i].wc_daddr + j);
2591 1.3.4.2 mjf if (wb && (wb->wb_off == off)) {
2592 1.3.4.2 mjf foundcnt++;
2593 1.3.4.2 mjf error =
2594 1.3.4.2 mjf wapbl_circ_read(wr,
2595 1.3.4.2 mjf scratch1, fsblklen,
2596 1.3.4.2 mjf &off);
2597 1.3.4.2 mjf if (error)
2598 1.3.4.2 mjf goto out;
2599 1.3.4.2 mjf error =
2600 1.3.4.2 mjf wapbl_read(scratch2,
2601 1.3.4.2 mjf fsblklen, fsdevvp,
2602 1.3.4.2 mjf wb->wb_blk);
2603 1.3.4.2 mjf if (error)
2604 1.3.4.2 mjf goto out;
2605 1.3.4.2 mjf if (memcmp(scratch1,
2606 1.3.4.2 mjf scratch2,
2607 1.3.4.2 mjf fsblklen)) {
2608 1.3.4.2 mjf printf(
2609 1.3.4.2 mjf "wapbl_verify: mismatch block %"PRId64" at off %"PRIdMAX"\n",
2610 1.3.4.2 mjf wb->wb_blk, (intmax_t)off);
2611 1.3.4.2 mjf dirtycnt++;
2612 1.3.4.2 mjf mismatchcnt++;
2613 1.3.4.2 mjf }
2614 1.3.4.2 mjf } else {
2615 1.3.4.2 mjf wapbl_circ_advance(wr,
2616 1.3.4.2 mjf fsblklen, &off);
2617 1.3.4.2 mjf }
2618 1.3.4.2 mjf }
2619 1.3.4.2 mjf #if 0
2620 1.3.4.2 mjf /*
2621 1.3.4.2 mjf * If all of the blocks in an entry
2622 1.3.4.2 mjf * are clean, then remove all of its
2623 1.3.4.2 mjf * blocks from the hashtable since they
2624 1.3.4.2 mjf * never will need replay.
2625 1.3.4.2 mjf */
2626 1.3.4.2 mjf if ((foundcnt != 0) &&
2627 1.3.4.2 mjf (dirtycnt == 0)) {
2628 1.3.4.2 mjf off = saveoff;
2629 1.3.4.2 mjf wapbl_circ_advance(wr,
2630 1.3.4.2 mjf logblklen, &off);
2631 1.3.4.2 mjf for (j = 0; j < n; j++) {
2632 1.3.4.2 mjf struct wapbl_blk *wb =
2633 1.3.4.2 mjf wapbl_blkhash_get(wr,
2634 1.3.4.2 mjf wc->wc_blocks[i].wc_daddr + j);
2635 1.3.4.2 mjf if (wb &&
2636 1.3.4.2 mjf (wb->wb_off == off)) {
2637 1.3.4.2 mjf wapbl_blkhash_rem(wr, wb->wb_blk);
2638 1.3.4.2 mjf }
2639 1.3.4.2 mjf wapbl_circ_advance(wr,
2640 1.3.4.2 mjf fsblklen, &off);
2641 1.3.4.2 mjf }
2642 1.3.4.2 mjf }
2643 1.3.4.2 mjf #endif
2644 1.3.4.2 mjf }
2645 1.3.4.2 mjf }
2646 1.3.4.2 mjf break;
2647 1.3.4.2 mjf case WAPBL_WC_REVOCATIONS:
2648 1.3.4.2 mjf case WAPBL_WC_INODES:
2649 1.3.4.2 mjf break;
2650 1.3.4.2 mjf default:
2651 1.3.4.2 mjf KASSERT(0);
2652 1.3.4.2 mjf }
2653 1.3.4.2 mjf #ifdef DEBUG
2654 1.3.4.2 mjf wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
2655 1.3.4.2 mjf KASSERT(off == saveoff);
2656 1.3.4.2 mjf #endif
2657 1.3.4.2 mjf }
2658 1.3.4.2 mjf out:
2659 1.3.4.2 mjf wapbl_free(scratch1);
2660 1.3.4.2 mjf wapbl_free(scratch2);
2661 1.3.4.2 mjf if (!error && mismatchcnt)
2662 1.3.4.2 mjf error = EFTYPE;
2663 1.3.4.2 mjf return error;
2664 1.3.4.2 mjf }
2665 1.3.4.2 mjf #endif
2666 1.3.4.2 mjf
2667 1.3.4.2 mjf int
2668 1.3.4.2 mjf wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
2669 1.3.4.2 mjf {
2670 1.3.4.3 mjf struct wapbl_blk *wb;
2671 1.3.4.3 mjf size_t i;
2672 1.3.4.2 mjf off_t off;
2673 1.3.4.3 mjf void *scratch;
2674 1.3.4.2 mjf int error = 0;
2675 1.3.4.3 mjf int fsblklen = 1 << wr->wr_fs_dev_bshift;
2676 1.3.4.2 mjf
2677 1.3.4.2 mjf KDASSERT(wapbl_replay_isopen(wr));
2678 1.3.4.2 mjf
2679 1.3.4.3 mjf scratch = wapbl_malloc(MAXBSIZE);
2680 1.3.4.2 mjf
2681 1.3.4.3 mjf for (i = 0; i < wr->wr_blkhashmask; ++i) {
2682 1.3.4.3 mjf LIST_FOREACH(wb, &wr->wr_blkhash[i], wb_hash) {
2683 1.3.4.3 mjf off = wb->wb_off;
2684 1.3.4.3 mjf error = wapbl_circ_read(wr, scratch, fsblklen, &off);
2685 1.3.4.3 mjf if (error)
2686 1.3.4.3 mjf break;
2687 1.3.4.3 mjf error = wapbl_write(scratch, fsblklen, fsdevvp,
2688 1.3.4.3 mjf wb->wb_blk);
2689 1.3.4.3 mjf if (error)
2690 1.3.4.3 mjf break;
2691 1.3.4.2 mjf }
2692 1.3.4.2 mjf }
2693 1.3.4.3 mjf
2694 1.3.4.3 mjf wapbl_free(scratch);
2695 1.3.4.2 mjf return error;
2696 1.3.4.2 mjf }
2697 1.3.4.2 mjf
2698 1.3.4.2 mjf int
2699 1.3.4.3 mjf wapbl_replay_can_read(struct wapbl_replay *wr, daddr_t blk, long len)
2700 1.3.4.3 mjf {
2701 1.3.4.3 mjf int fsblklen = 1 << wr->wr_fs_dev_bshift;
2702 1.3.4.3 mjf
2703 1.3.4.3 mjf KDASSERT(wapbl_replay_isopen(wr));
2704 1.3.4.3 mjf KASSERT((len % fsblklen) == 0);
2705 1.3.4.3 mjf
2706 1.3.4.3 mjf while (len != 0) {
2707 1.3.4.3 mjf struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2708 1.3.4.3 mjf if (wb)
2709 1.3.4.3 mjf return 1;
2710 1.3.4.3 mjf len -= fsblklen;
2711 1.3.4.3 mjf }
2712 1.3.4.3 mjf return 0;
2713 1.3.4.3 mjf }
2714 1.3.4.3 mjf
2715 1.3.4.3 mjf int
2716 1.3.4.2 mjf wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
2717 1.3.4.2 mjf {
2718 1.3.4.3 mjf int fsblklen = 1 << wr->wr_fs_dev_bshift;
2719 1.3.4.2 mjf
2720 1.3.4.2 mjf KDASSERT(wapbl_replay_isopen(wr));
2721 1.3.4.2 mjf
2722 1.3.4.2 mjf KASSERT((len % fsblklen) == 0);
2723 1.3.4.2 mjf
2724 1.3.4.2 mjf while (len != 0) {
2725 1.3.4.2 mjf struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2726 1.3.4.2 mjf if (wb) {
2727 1.3.4.2 mjf off_t off = wb->wb_off;
2728 1.3.4.2 mjf int error;
2729 1.3.4.2 mjf error = wapbl_circ_read(wr, data, fsblklen, &off);
2730 1.3.4.2 mjf if (error)
2731 1.3.4.2 mjf return error;
2732 1.3.4.2 mjf }
2733 1.3.4.2 mjf data = (uint8_t *)data + fsblklen;
2734 1.3.4.2 mjf len -= fsblklen;
2735 1.3.4.2 mjf blk++;
2736 1.3.4.2 mjf }
2737 1.3.4.2 mjf return 0;
2738 1.3.4.2 mjf }
2739