vfs_wapbl.c revision 1.117 1 1.117 riastrad /* $NetBSD: vfs_wapbl.c,v 1.117 2024/12/07 15:10:42 riastradh Exp $ */
2 1.2 simonb
3 1.2 simonb /*-
4 1.23 ad * Copyright (c) 2003, 2008, 2009 The NetBSD Foundation, Inc.
5 1.2 simonb * All rights reserved.
6 1.2 simonb *
7 1.2 simonb * This code is derived from software contributed to The NetBSD Foundation
8 1.2 simonb * by Wasabi Systems, Inc.
9 1.2 simonb *
10 1.2 simonb * Redistribution and use in source and binary forms, with or without
11 1.2 simonb * modification, are permitted provided that the following conditions
12 1.2 simonb * are met:
13 1.2 simonb * 1. Redistributions of source code must retain the above copyright
14 1.2 simonb * notice, this list of conditions and the following disclaimer.
15 1.2 simonb * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 simonb * notice, this list of conditions and the following disclaimer in the
17 1.2 simonb * documentation and/or other materials provided with the distribution.
18 1.2 simonb *
19 1.2 simonb * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 simonb * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 simonb * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 simonb * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 simonb * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 simonb * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 simonb * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 simonb * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 simonb * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 simonb * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 simonb * POSSIBILITY OF SUCH DAMAGE.
30 1.2 simonb */
31 1.2 simonb
32 1.2 simonb /*
33 1.2 simonb * This implements file system independent write ahead filesystem logging.
34 1.2 simonb */
35 1.4 joerg
36 1.4 joerg #define WAPBL_INTERNAL
37 1.4 joerg
38 1.2 simonb #include <sys/cdefs.h>
39 1.117 riastrad __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.117 2024/12/07 15:10:42 riastradh Exp $");
40 1.2 simonb
41 1.2 simonb #include <sys/param.h>
42 1.114 riastrad #include <sys/types.h>
43 1.114 riastrad
44 1.31 mlelstv #include <sys/bitops.h>
45 1.68 riastrad #include <sys/time.h>
46 1.68 riastrad #include <sys/wapbl.h>
47 1.68 riastrad #include <sys/wapbl_replay.h>
48 1.2 simonb
49 1.2 simonb #ifdef _KERNEL
50 1.68 riastrad
51 1.68 riastrad #include <sys/atomic.h>
52 1.68 riastrad #include <sys/conf.h>
53 1.90 riastrad #include <sys/evcnt.h>
54 1.68 riastrad #include <sys/file.h>
55 1.68 riastrad #include <sys/kauth.h>
56 1.68 riastrad #include <sys/kernel.h>
57 1.68 riastrad #include <sys/module.h>
58 1.68 riastrad #include <sys/mount.h>
59 1.68 riastrad #include <sys/mutex.h>
60 1.2 simonb #include <sys/namei.h>
61 1.2 simonb #include <sys/proc.h>
62 1.68 riastrad #include <sys/resourcevar.h>
63 1.116 riastrad #include <sys/sdt.h>
64 1.39 christos #include <sys/sysctl.h>
65 1.2 simonb #include <sys/uio.h>
66 1.2 simonb #include <sys/vnode.h>
67 1.2 simonb
68 1.2 simonb #include <miscfs/specfs/specdev.h>
69 1.2 simonb
70 1.114 riastrad #define wapbl_alloc(s) kmem_alloc((s), KM_SLEEP)
71 1.114 riastrad #define wapbl_free(a, s) kmem_free((a), (s))
72 1.114 riastrad #define wapbl_calloc(n, s) kmem_zalloc((n)*(s), KM_SLEEP)
73 1.2 simonb
74 1.39 christos static int wapbl_flush_disk_cache = 1;
75 1.39 christos static int wapbl_verbose_commit = 0;
76 1.115 riastrad static int wapbl_allow_dpofua = 0; /* switched off by default for now */
77 1.95 jdolecek static int wapbl_journal_iobufs = 4;
78 1.39 christos
79 1.57 joerg static inline size_t wapbl_space_free(size_t, off_t, off_t);
80 1.57 joerg
81 1.2 simonb #else /* !_KERNEL */
82 1.68 riastrad
83 1.2 simonb #include <assert.h>
84 1.2 simonb #include <errno.h>
85 1.68 riastrad #include <stdbool.h>
86 1.2 simonb #include <stdio.h>
87 1.2 simonb #include <stdlib.h>
88 1.2 simonb #include <string.h>
89 1.2 simonb
90 1.114 riastrad #define KDASSERT(x) assert(x)
91 1.114 riastrad #define KASSERT(x) assert(x)
92 1.114 riastrad #define wapbl_alloc(s) malloc(s)
93 1.114 riastrad #define wapbl_free(a, s) free(a)
94 1.114 riastrad #define wapbl_calloc(n, s) calloc((n), (s))
95 1.2 simonb
96 1.117 riastrad #define SET_ERROR(E) (E)
97 1.117 riastrad
98 1.2 simonb #endif /* !_KERNEL */
99 1.2 simonb
100 1.2 simonb /*
101 1.2 simonb * INTERNAL DATA STRUCTURES
102 1.2 simonb */
103 1.2 simonb
104 1.91 riastrad /*
105 1.2 simonb * This structure holds per-mount log information.
106 1.2 simonb *
107 1.2 simonb * Legend: a = atomic access only
108 1.2 simonb * r = read-only after init
109 1.2 simonb * l = rwlock held
110 1.2 simonb * m = mutex held
111 1.38 hannken * lm = rwlock held writing or mutex held
112 1.2 simonb * u = unlocked access ok
113 1.2 simonb * b = bufcache_lock held
114 1.2 simonb */
115 1.60 matt LIST_HEAD(wapbl_ino_head, wapbl_ino);
116 1.2 simonb struct wapbl {
117 1.2 simonb struct vnode *wl_logvp; /* r: log here */
118 1.2 simonb struct vnode *wl_devvp; /* r: log on this device */
119 1.2 simonb struct mount *wl_mount; /* r: mountpoint wl is associated with */
120 1.2 simonb daddr_t wl_logpbn; /* r: Physical block number of start of log */
121 1.2 simonb int wl_log_dev_bshift; /* r: logarithm of device block size of log
122 1.2 simonb device */
123 1.2 simonb int wl_fs_dev_bshift; /* r: logarithm of device block size of
124 1.2 simonb filesystem device */
125 1.2 simonb
126 1.3 yamt unsigned wl_lock_count; /* m: Count of transactions in progress */
127 1.2 simonb
128 1.115 riastrad size_t wl_circ_size; /* r: Number of bytes in buffer of log */
129 1.2 simonb size_t wl_circ_off; /* r: Number of bytes reserved at start */
130 1.2 simonb
131 1.2 simonb size_t wl_bufcount_max; /* r: Number of buffers reserved for log */
132 1.2 simonb size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */
133 1.2 simonb
134 1.2 simonb off_t wl_head; /* l: Byte offset of log head */
135 1.2 simonb off_t wl_tail; /* l: Byte offset of log tail */
136 1.2 simonb /*
137 1.71 riastrad * WAPBL log layout, stored on wl_devvp at wl_logpbn:
138 1.71 riastrad *
139 1.71 riastrad * ___________________ wl_circ_size __________________
140 1.71 riastrad * / \
141 1.71 riastrad * +---------+---------+-------+--------------+--------+
142 1.71 riastrad * [ commit0 | commit1 | CCWCW | EEEEEEEEEEEE | CCCWCW ]
143 1.71 riastrad * +---------+---------+-------+--------------+--------+
144 1.71 riastrad * wl_circ_off --^ ^-- wl_head ^-- wl_tail
145 1.71 riastrad *
146 1.71 riastrad * commit0 and commit1 are commit headers. A commit header has
147 1.71 riastrad * a generation number, indicating which of the two headers is
148 1.71 riastrad * more recent, and an assignment of head and tail pointers.
149 1.71 riastrad * The rest is a circular queue of log records, starting at
150 1.71 riastrad * the byte offset wl_circ_off.
151 1.71 riastrad *
152 1.71 riastrad * E marks empty space for records.
153 1.71 riastrad * W marks records for block writes issued but waiting.
154 1.71 riastrad * C marks completed records.
155 1.71 riastrad *
156 1.71 riastrad * wapbl_flush writes new records to empty `E' spaces after
157 1.71 riastrad * wl_head from the current transaction in memory.
158 1.71 riastrad *
159 1.71 riastrad * wapbl_truncate advances wl_tail past any completed `C'
160 1.71 riastrad * records, freeing them up for use.
161 1.71 riastrad *
162 1.71 riastrad * head == tail == 0 means log is empty.
163 1.71 riastrad * head == tail != 0 means log is full.
164 1.71 riastrad *
165 1.71 riastrad * See assertions in wapbl_advance() for other boundary
166 1.71 riastrad * conditions.
167 1.71 riastrad *
168 1.71 riastrad * Only wapbl_flush moves the head, except when wapbl_truncate
169 1.71 riastrad * sets it to 0 to indicate that the log is empty.
170 1.71 riastrad *
171 1.71 riastrad * Only wapbl_truncate moves the tail, except when wapbl_flush
172 1.71 riastrad * sets it to wl_circ_off to indicate that the log is full.
173 1.2 simonb */
174 1.2 simonb
175 1.2 simonb struct wapbl_wc_header *wl_wc_header; /* l */
176 1.2 simonb void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */
177 1.2 simonb
178 1.2 simonb kmutex_t wl_mtx; /* u: short-term lock */
179 1.2 simonb krwlock_t wl_rwlock; /* u: File system transaction lock */
180 1.2 simonb
181 1.2 simonb /*
182 1.2 simonb * Must be held while accessing
183 1.2 simonb * wl_count or wl_bufs or head or tail
184 1.2 simonb */
185 1.2 simonb
186 1.87 jdolecek #if _KERNEL
187 1.2 simonb /*
188 1.2 simonb * Callback called from within the flush routine to flush any extra
189 1.2 simonb * bits. Note that flush may be skipped without calling this if
190 1.2 simonb * there are no outstanding buffers in the transaction.
191 1.2 simonb */
192 1.2 simonb wapbl_flush_fn_t wl_flush; /* r */
193 1.2 simonb wapbl_flush_fn_t wl_flush_abort;/* r */
194 1.87 jdolecek
195 1.87 jdolecek /* Event counters */
196 1.87 jdolecek char wl_ev_group[EVCNT_STRING_MAX]; /* r */
197 1.87 jdolecek struct evcnt wl_ev_commit; /* l */
198 1.87 jdolecek struct evcnt wl_ev_journalwrite; /* l */
199 1.95 jdolecek struct evcnt wl_ev_jbufs_bio_nowait; /* l */
200 1.87 jdolecek struct evcnt wl_ev_metawrite; /* lm */
201 1.87 jdolecek struct evcnt wl_ev_cacheflush; /* l */
202 1.5 joerg #endif
203 1.2 simonb
204 1.2 simonb size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */
205 1.2 simonb size_t wl_bufcount; /* m: Count of buffers in wl_bufs */
206 1.2 simonb size_t wl_bcount; /* m: Total bcount of wl_bufs */
207 1.2 simonb
208 1.94 jdolecek TAILQ_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
209 1.2 simonb
210 1.2 simonb kcondvar_t wl_reclaimable_cv; /* m (obviously) */
211 1.2 simonb size_t wl_reclaimable_bytes; /* m: Amount of space available for
212 1.2 simonb reclamation by truncate */
213 1.2 simonb int wl_error_count; /* m: # of wl_entries with errors */
214 1.2 simonb size_t wl_reserved_bytes; /* never truncate log smaller than this */
215 1.2 simonb
216 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
217 1.2 simonb size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
218 1.2 simonb #endif
219 1.2 simonb
220 1.79 jdolecek #if _KERNEL
221 1.79 jdolecek int wl_brperjblock; /* r Block records per journal block */
222 1.79 jdolecek #endif
223 1.79 jdolecek
224 1.86 jdolecek TAILQ_HEAD(, wapbl_dealloc) wl_dealloclist; /* lm: list head */
225 1.81 jdolecek int wl_dealloccnt; /* lm: total count */
226 1.81 jdolecek int wl_dealloclim; /* r: max count */
227 1.2 simonb
228 1.2 simonb /* hashtable of inode numbers for allocated but unlinked inodes */
229 1.2 simonb /* synch ??? */
230 1.60 matt struct wapbl_ino_head *wl_inohash;
231 1.2 simonb u_long wl_inohashmask;
232 1.2 simonb int wl_inohashcnt;
233 1.2 simonb
234 1.107 jdolecek SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* m: On disk transaction
235 1.2 simonb accounting */
236 1.54 hannken
237 1.95 jdolecek /* buffers for wapbl_buffered_write() */
238 1.95 jdolecek TAILQ_HEAD(, buf) wl_iobufs; /* l: Free or filling bufs */
239 1.95 jdolecek TAILQ_HEAD(, buf) wl_iobufs_busy; /* l: In-transit bufs */
240 1.93 jdolecek
241 1.115 riastrad int wl_dkcache; /* r: disk cache flags */
242 1.93 jdolecek #define WAPBL_USE_FUA(wl) \
243 1.96 jdolecek (wapbl_allow_dpofua && ISSET((wl)->wl_dkcache, DKCACHE_FUA))
244 1.93 jdolecek #define WAPBL_JFLAGS(wl) \
245 1.93 jdolecek (WAPBL_USE_FUA(wl) ? (wl)->wl_jwrite_flags : 0)
246 1.101 jdolecek #define WAPBL_JDATA_FLAGS(wl) \
247 1.101 jdolecek (WAPBL_JFLAGS(wl) & B_MEDIA_DPO) /* only DPO */
248 1.115 riastrad int wl_jwrite_flags; /* r: journal write flags */
249 1.2 simonb };
250 1.2 simonb
251 1.2 simonb #ifdef WAPBL_DEBUG_PRINT
252 1.2 simonb int wapbl_debug_print = WAPBL_DEBUG_PRINT;
253 1.2 simonb #endif
254 1.2 simonb
255 1.2 simonb /****************************************************************/
256 1.2 simonb #ifdef _KERNEL
257 1.2 simonb
258 1.2 simonb #ifdef WAPBL_DEBUG
259 1.2 simonb struct wapbl *wapbl_debug_wl;
260 1.2 simonb #endif
261 1.2 simonb
262 1.114 riastrad static int wapbl_write_commit(struct wapbl *, off_t, off_t);
263 1.114 riastrad static int wapbl_write_blocks(struct wapbl *, off_t *);
264 1.114 riastrad static int wapbl_write_revocations(struct wapbl *, off_t *);
265 1.114 riastrad static int wapbl_write_inodes(struct wapbl *, off_t *);
266 1.2 simonb #endif /* _KERNEL */
267 1.2 simonb
268 1.114 riastrad static int wapbl_replay_process(struct wapbl_replay *, off_t, off_t);
269 1.2 simonb
270 1.114 riastrad static inline size_t wapbl_space_used(size_t, off_t, off_t);
271 1.2 simonb
272 1.2 simonb #ifdef _KERNEL
273 1.2 simonb
274 1.51 para static struct pool wapbl_entry_pool;
275 1.81 jdolecek static struct pool wapbl_dealloc_pool;
276 1.51 para
277 1.2 simonb #define WAPBL_INODETRK_SIZE 83
278 1.2 simonb static int wapbl_ino_pool_refcount;
279 1.2 simonb static struct pool wapbl_ino_pool;
280 1.2 simonb struct wapbl_ino {
281 1.2 simonb LIST_ENTRY(wapbl_ino) wi_hash;
282 1.2 simonb ino_t wi_ino;
283 1.2 simonb mode_t wi_mode;
284 1.2 simonb };
285 1.2 simonb
286 1.2 simonb static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
287 1.2 simonb static void wapbl_inodetrk_free(struct wapbl *wl);
288 1.2 simonb static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
289 1.2 simonb
290 1.2 simonb static size_t wapbl_transaction_len(struct wapbl *wl);
291 1.30 uebayasi static inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
292 1.2 simonb
293 1.86 jdolecek static void wapbl_deallocation_free(struct wapbl *, struct wapbl_dealloc *,
294 1.114 riastrad bool);
295 1.86 jdolecek
296 1.87 jdolecek static void wapbl_evcnt_init(struct wapbl *);
297 1.87 jdolecek static void wapbl_evcnt_free(struct wapbl *);
298 1.87 jdolecek
299 1.93 jdolecek static void wapbl_dkcache_init(struct wapbl *);
300 1.93 jdolecek
301 1.13 joerg #if 0
302 1.4 joerg int wapbl_replay_verify(struct wapbl_replay *, struct vnode *);
303 1.4 joerg #endif
304 1.4 joerg
305 1.4 joerg static int wapbl_replay_isopen1(struct wapbl_replay *);
306 1.4 joerg
307 1.103 jdolecek const struct wapbl_ops wapbl_ops = {
308 1.2 simonb .wo_wapbl_discard = wapbl_discard,
309 1.2 simonb .wo_wapbl_replay_isopen = wapbl_replay_isopen1,
310 1.6 joerg .wo_wapbl_replay_can_read = wapbl_replay_can_read,
311 1.2 simonb .wo_wapbl_replay_read = wapbl_replay_read,
312 1.2 simonb .wo_wapbl_add_buf = wapbl_add_buf,
313 1.2 simonb .wo_wapbl_remove_buf = wapbl_remove_buf,
314 1.2 simonb .wo_wapbl_resize_buf = wapbl_resize_buf,
315 1.2 simonb .wo_wapbl_begin = wapbl_begin,
316 1.2 simonb .wo_wapbl_end = wapbl_end,
317 1.2 simonb .wo_wapbl_junlock_assert= wapbl_junlock_assert,
318 1.102 jdolecek .wo_wapbl_jlock_assert = wapbl_jlock_assert,
319 1.2 simonb
320 1.2 simonb /* XXX: the following is only used to say "this is a wapbl buf" */
321 1.2 simonb .wo_wapbl_biodone = wapbl_biodone,
322 1.2 simonb };
323 1.2 simonb
324 1.106 pgoyette SYSCTL_SETUP(wapbl_sysctl_init, "wapbl sysctl")
325 1.39 christos {
326 1.39 christos int rv;
327 1.39 christos const struct sysctlnode *rnode, *cnode;
328 1.39 christos
329 1.106 pgoyette rv = sysctl_createv(clog, 0, NULL, &rnode,
330 1.114 riastrad CTLFLAG_PERMANENT,
331 1.114 riastrad CTLTYPE_NODE, "wapbl",
332 1.114 riastrad SYSCTL_DESCR("WAPBL journaling options"),
333 1.114 riastrad NULL, 0, NULL, 0,
334 1.114 riastrad CTL_VFS, CTL_CREATE, CTL_EOL);
335 1.39 christos if (rv)
336 1.106 pgoyette return;
337 1.39 christos
338 1.106 pgoyette rv = sysctl_createv(clog, 0, &rnode, &cnode,
339 1.114 riastrad CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
340 1.114 riastrad CTLTYPE_INT, "flush_disk_cache",
341 1.114 riastrad SYSCTL_DESCR("flush disk cache"),
342 1.114 riastrad NULL, 0, &wapbl_flush_disk_cache, 0,
343 1.114 riastrad CTL_CREATE, CTL_EOL);
344 1.39 christos if (rv)
345 1.106 pgoyette return;
346 1.39 christos
347 1.106 pgoyette rv = sysctl_createv(clog, 0, &rnode, &cnode,
348 1.114 riastrad CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
349 1.114 riastrad CTLTYPE_INT, "verbose_commit",
350 1.114 riastrad SYSCTL_DESCR("show time and size of wapbl log commits"),
351 1.114 riastrad NULL, 0, &wapbl_verbose_commit, 0,
352 1.114 riastrad CTL_CREATE, CTL_EOL);
353 1.93 jdolecek if (rv)
354 1.106 pgoyette return;
355 1.93 jdolecek
356 1.106 pgoyette rv = sysctl_createv(clog, 0, &rnode, &cnode,
357 1.114 riastrad CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
358 1.114 riastrad CTLTYPE_INT, "allow_dpofua",
359 1.114 riastrad SYSCTL_DESCR("allow use of FUA/DPO instead of cache flush"
360 1.114 riastrad " if available"),
361 1.114 riastrad NULL, 0, &wapbl_allow_dpofua, 0,
362 1.114 riastrad CTL_CREATE, CTL_EOL);
363 1.93 jdolecek if (rv)
364 1.106 pgoyette return;
365 1.93 jdolecek
366 1.106 pgoyette rv = sysctl_createv(clog, 0, &rnode, &cnode,
367 1.114 riastrad CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
368 1.114 riastrad CTLTYPE_INT, "journal_iobufs",
369 1.114 riastrad SYSCTL_DESCR("count of bufs used for journal I/O"
370 1.114 riastrad " (max async count)"),
371 1.114 riastrad NULL, 0, &wapbl_journal_iobufs, 0,
372 1.114 riastrad CTL_CREATE, CTL_EOL);
373 1.95 jdolecek if (rv)
374 1.106 pgoyette return;
375 1.95 jdolecek
376 1.106 pgoyette return;
377 1.39 christos }
378 1.39 christos
379 1.39 christos static void
380 1.39 christos wapbl_init(void)
381 1.39 christos {
382 1.51 para
383 1.51 para pool_init(&wapbl_entry_pool, sizeof(struct wapbl_entry), 0, 0, 0,
384 1.51 para "wapblentrypl", &pool_allocator_kmem, IPL_VM);
385 1.81 jdolecek pool_init(&wapbl_dealloc_pool, sizeof(struct wapbl_dealloc), 0, 0, 0,
386 1.81 jdolecek "wapbldealloc", &pool_allocator_nointr, IPL_NONE);
387 1.39 christos }
388 1.39 christos
389 1.39 christos static int
390 1.74 riastrad wapbl_fini(void)
391 1.39 christos {
392 1.51 para
393 1.81 jdolecek pool_destroy(&wapbl_dealloc_pool);
394 1.51 para pool_destroy(&wapbl_entry_pool);
395 1.51 para
396 1.39 christos return 0;
397 1.39 christos }
398 1.39 christos
399 1.87 jdolecek static void
400 1.87 jdolecek wapbl_evcnt_init(struct wapbl *wl)
401 1.87 jdolecek {
402 1.114 riastrad
403 1.87 jdolecek snprintf(wl->wl_ev_group, sizeof(wl->wl_ev_group),
404 1.87 jdolecek "wapbl fsid 0x%x/0x%x",
405 1.87 jdolecek wl->wl_mount->mnt_stat.f_fsidx.__fsid_val[0],
406 1.114 riastrad wl->wl_mount->mnt_stat.f_fsidx.__fsid_val[1]);
407 1.87 jdolecek
408 1.87 jdolecek evcnt_attach_dynamic(&wl->wl_ev_commit, EVCNT_TYPE_MISC,
409 1.87 jdolecek NULL, wl->wl_ev_group, "commit");
410 1.87 jdolecek evcnt_attach_dynamic(&wl->wl_ev_journalwrite, EVCNT_TYPE_MISC,
411 1.98 jdolecek NULL, wl->wl_ev_group, "journal write total");
412 1.95 jdolecek evcnt_attach_dynamic(&wl->wl_ev_jbufs_bio_nowait, EVCNT_TYPE_MISC,
413 1.98 jdolecek NULL, wl->wl_ev_group, "journal write finished async");
414 1.87 jdolecek evcnt_attach_dynamic(&wl->wl_ev_metawrite, EVCNT_TYPE_MISC,
415 1.98 jdolecek NULL, wl->wl_ev_group, "metadata async write");
416 1.87 jdolecek evcnt_attach_dynamic(&wl->wl_ev_cacheflush, EVCNT_TYPE_MISC,
417 1.87 jdolecek NULL, wl->wl_ev_group, "cache flush");
418 1.87 jdolecek }
419 1.87 jdolecek
420 1.87 jdolecek static void
421 1.87 jdolecek wapbl_evcnt_free(struct wapbl *wl)
422 1.87 jdolecek {
423 1.114 riastrad
424 1.87 jdolecek evcnt_detach(&wl->wl_ev_commit);
425 1.87 jdolecek evcnt_detach(&wl->wl_ev_journalwrite);
426 1.95 jdolecek evcnt_detach(&wl->wl_ev_jbufs_bio_nowait);
427 1.87 jdolecek evcnt_detach(&wl->wl_ev_metawrite);
428 1.87 jdolecek evcnt_detach(&wl->wl_ev_cacheflush);
429 1.87 jdolecek }
430 1.87 jdolecek
431 1.93 jdolecek static void
432 1.93 jdolecek wapbl_dkcache_init(struct wapbl *wl)
433 1.93 jdolecek {
434 1.93 jdolecek int error;
435 1.93 jdolecek
436 1.93 jdolecek /* Get disk cache flags */
437 1.93 jdolecek error = VOP_IOCTL(wl->wl_devvp, DIOCGCACHE, &wl->wl_dkcache,
438 1.93 jdolecek FWRITE, FSCRED);
439 1.93 jdolecek if (error) {
440 1.93 jdolecek /* behave as if there was a write cache */
441 1.93 jdolecek wl->wl_dkcache = DKCACHE_WRITE;
442 1.93 jdolecek }
443 1.93 jdolecek
444 1.93 jdolecek /* Use FUA instead of cache flush if available */
445 1.101 jdolecek if (ISSET(wl->wl_dkcache, DKCACHE_FUA))
446 1.93 jdolecek wl->wl_jwrite_flags |= B_MEDIA_FUA;
447 1.93 jdolecek
448 1.93 jdolecek /* Use DPO for journal writes if available */
449 1.93 jdolecek if (ISSET(wl->wl_dkcache, DKCACHE_DPO))
450 1.93 jdolecek wl->wl_jwrite_flags |= B_MEDIA_DPO;
451 1.93 jdolecek }
452 1.93 jdolecek
453 1.39 christos static int
454 1.15 joerg wapbl_start_flush_inodes(struct wapbl *wl, struct wapbl_replay *wr)
455 1.15 joerg {
456 1.15 joerg int error, i;
457 1.15 joerg
458 1.15 joerg WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
459 1.15 joerg ("wapbl_start: reusing log with %d inodes\n", wr->wr_inodescnt));
460 1.15 joerg
461 1.15 joerg /*
462 1.15 joerg * Its only valid to reuse the replay log if its
463 1.15 joerg * the same as the new log we just opened.
464 1.15 joerg */
465 1.15 joerg KDASSERT(!wapbl_replay_isopen(wr));
466 1.47 christos KASSERT(wl->wl_devvp->v_type == VBLK);
467 1.47 christos KASSERT(wr->wr_devvp->v_type == VBLK);
468 1.15 joerg KASSERT(wl->wl_devvp->v_rdev == wr->wr_devvp->v_rdev);
469 1.15 joerg KASSERT(wl->wl_logpbn == wr->wr_logpbn);
470 1.15 joerg KASSERT(wl->wl_circ_size == wr->wr_circ_size);
471 1.15 joerg KASSERT(wl->wl_circ_off == wr->wr_circ_off);
472 1.15 joerg KASSERT(wl->wl_log_dev_bshift == wr->wr_log_dev_bshift);
473 1.15 joerg KASSERT(wl->wl_fs_dev_bshift == wr->wr_fs_dev_bshift);
474 1.15 joerg
475 1.15 joerg wl->wl_wc_header->wc_generation = wr->wr_generation + 1;
476 1.15 joerg
477 1.15 joerg for (i = 0; i < wr->wr_inodescnt; i++)
478 1.15 joerg wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
479 1.15 joerg wr->wr_inodes[i].wr_imode);
480 1.15 joerg
481 1.15 joerg /* Make sure new transaction won't overwrite old inodes list */
482 1.91 riastrad KDASSERT(wapbl_transaction_len(wl) <=
483 1.15 joerg wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
484 1.114 riastrad wr->wr_inodestail));
485 1.15 joerg
486 1.15 joerg wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
487 1.15 joerg wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
488 1.15 joerg wapbl_transaction_len(wl);
489 1.15 joerg
490 1.15 joerg error = wapbl_write_inodes(wl, &wl->wl_head);
491 1.15 joerg if (error)
492 1.15 joerg return error;
493 1.15 joerg
494 1.15 joerg KASSERT(wl->wl_head != wl->wl_tail);
495 1.15 joerg KASSERT(wl->wl_head != 0);
496 1.15 joerg
497 1.15 joerg return 0;
498 1.15 joerg }
499 1.15 joerg
500 1.2 simonb int
501 1.2 simonb wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
502 1.114 riastrad daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
503 1.114 riastrad wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
504 1.2 simonb {
505 1.2 simonb struct wapbl *wl;
506 1.2 simonb struct vnode *devvp;
507 1.2 simonb daddr_t logpbn;
508 1.2 simonb int error;
509 1.31 mlelstv int log_dev_bshift = ilog2(blksize);
510 1.32 mlelstv int fs_dev_bshift = log_dev_bshift;
511 1.2 simonb int run;
512 1.2 simonb
513 1.114 riastrad WAPBL_PRINTF(WAPBL_PRINT_OPEN,
514 1.114 riastrad ("wapbl_start: vp=%p off=%"PRId64" count=%zu blksize=%zu\n",
515 1.114 riastrad vp, off, count, blksize));
516 1.2 simonb
517 1.2 simonb if (log_dev_bshift > fs_dev_bshift) {
518 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_OPEN,
519 1.114 riastrad ("wapbl: log device's block size cannot be larger "
520 1.114 riastrad "than filesystem's\n"));
521 1.2 simonb /*
522 1.2 simonb * Not currently implemented, although it could be if
523 1.2 simonb * needed someday.
524 1.2 simonb */
525 1.116 riastrad return SET_ERROR(ENOSYS);
526 1.2 simonb }
527 1.2 simonb
528 1.2 simonb if (off < 0)
529 1.116 riastrad return SET_ERROR(EINVAL);
530 1.2 simonb
531 1.2 simonb if (blksize < DEV_BSIZE)
532 1.116 riastrad return SET_ERROR(EINVAL);
533 1.2 simonb if (blksize % DEV_BSIZE)
534 1.116 riastrad return SET_ERROR(EINVAL);
535 1.2 simonb
536 1.2 simonb /* XXXTODO: verify that the full load is writable */
537 1.2 simonb
538 1.2 simonb /*
539 1.2 simonb * XXX check for minimum log size
540 1.2 simonb * minimum is governed by minimum amount of space
541 1.2 simonb * to complete a transaction. (probably truncate)
542 1.2 simonb */
543 1.2 simonb /* XXX for now pick something minimal */
544 1.2 simonb if ((count * blksize) < MAXPHYS) {
545 1.116 riastrad return SET_ERROR(ENOSPC);
546 1.2 simonb }
547 1.2 simonb
548 1.2 simonb if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
549 1.2 simonb return error;
550 1.2 simonb }
551 1.2 simonb
552 1.2 simonb wl = wapbl_calloc(1, sizeof(*wl));
553 1.2 simonb rw_init(&wl->wl_rwlock);
554 1.2 simonb mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
555 1.2 simonb cv_init(&wl->wl_reclaimable_cv, "wapblrec");
556 1.94 jdolecek TAILQ_INIT(&wl->wl_bufs);
557 1.2 simonb SIMPLEQ_INIT(&wl->wl_entries);
558 1.2 simonb
559 1.2 simonb wl->wl_logvp = vp;
560 1.2 simonb wl->wl_devvp = devvp;
561 1.2 simonb wl->wl_mount = mp;
562 1.2 simonb wl->wl_logpbn = logpbn;
563 1.2 simonb wl->wl_log_dev_bshift = log_dev_bshift;
564 1.2 simonb wl->wl_fs_dev_bshift = fs_dev_bshift;
565 1.2 simonb
566 1.2 simonb wl->wl_flush = flushfn;
567 1.2 simonb wl->wl_flush_abort = flushabortfn;
568 1.2 simonb
569 1.2 simonb /* Reserve two log device blocks for the commit headers */
570 1.2 simonb wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
571 1.34 mlelstv wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
572 1.2 simonb /* truncate the log usage to a multiple of log_dev_bshift */
573 1.2 simonb wl->wl_circ_size >>= wl->wl_log_dev_bshift;
574 1.2 simonb wl->wl_circ_size <<= wl->wl_log_dev_bshift;
575 1.2 simonb
576 1.2 simonb /*
577 1.2 simonb * wl_bufbytes_max limits the size of the in memory transaction space.
578 1.2 simonb * - Since buffers are allocated and accounted for in units of
579 1.2 simonb * PAGE_SIZE it is required to be a multiple of PAGE_SIZE
580 1.2 simonb * (i.e. 1<<PAGE_SHIFT)
581 1.2 simonb * - Since the log device has to be written in units of
582 1.111 andvar * 1<<wl_log_dev_bshift it is required to be a multiple of
583 1.2 simonb * 1<<wl_log_dev_bshift.
584 1.2 simonb * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
585 1.2 simonb * it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
586 1.2 simonb * Therefore it must be multiple of the least common multiple of those
587 1.2 simonb * three quantities. Fortunately, all of those quantities are
588 1.2 simonb * guaranteed to be a power of two, and the least common multiple of
589 1.2 simonb * a set of numbers which are all powers of two is simply the maximum
590 1.2 simonb * of those numbers. Finally, the maximum logarithm of a power of two
591 1.2 simonb * is the same as the log of the maximum power of two. So we can do
592 1.2 simonb * the following operations to size wl_bufbytes_max:
593 1.2 simonb */
594 1.2 simonb
595 1.2 simonb /* XXX fix actual number of pages reserved per filesystem. */
596 1.2 simonb wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
597 1.2 simonb
598 1.2 simonb /* Round wl_bufbytes_max to the largest power of two constraint */
599 1.2 simonb wl->wl_bufbytes_max >>= PAGE_SHIFT;
600 1.2 simonb wl->wl_bufbytes_max <<= PAGE_SHIFT;
601 1.2 simonb wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
602 1.2 simonb wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
603 1.2 simonb wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
604 1.2 simonb wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
605 1.2 simonb
606 1.2 simonb /* XXX maybe use filesystem fragment size instead of 1024 */
607 1.2 simonb /* XXX fix actual number of buffers reserved per filesystem. */
608 1.97 chs wl->wl_bufcount_max = (buf_nbuf() / 2) * 1024;
609 1.2 simonb
610 1.79 jdolecek wl->wl_brperjblock = ((1<<wl->wl_log_dev_bshift)
611 1.79 jdolecek - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
612 1.79 jdolecek sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
613 1.79 jdolecek KASSERT(wl->wl_brperjblock > 0);
614 1.79 jdolecek
615 1.2 simonb /* XXX tie this into resource estimation */
616 1.41 hannken wl->wl_dealloclim = wl->wl_bufbytes_max / mp->mnt_stat.f_bsize / 2;
617 1.86 jdolecek TAILQ_INIT(&wl->wl_dealloclist);
618 1.91 riastrad
619 1.2 simonb wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
620 1.2 simonb
621 1.87 jdolecek wapbl_evcnt_init(wl);
622 1.87 jdolecek
623 1.93 jdolecek wapbl_dkcache_init(wl);
624 1.93 jdolecek
625 1.2 simonb /* Initialize the commit header */
626 1.2 simonb {
627 1.2 simonb struct wapbl_wc_header *wc;
628 1.14 joerg size_t len = 1 << wl->wl_log_dev_bshift;
629 1.2 simonb wc = wapbl_calloc(1, len);
630 1.2 simonb wc->wc_type = WAPBL_WC_HEADER;
631 1.2 simonb wc->wc_len = len;
632 1.2 simonb wc->wc_circ_off = wl->wl_circ_off;
633 1.2 simonb wc->wc_circ_size = wl->wl_circ_size;
634 1.2 simonb /* XXX wc->wc_fsid */
635 1.2 simonb wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
636 1.2 simonb wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
637 1.2 simonb wl->wl_wc_header = wc;
638 1.51 para wl->wl_wc_scratch = wapbl_alloc(len);
639 1.2 simonb }
640 1.2 simonb
641 1.95 jdolecek TAILQ_INIT(&wl->wl_iobufs);
642 1.95 jdolecek TAILQ_INIT(&wl->wl_iobufs_busy);
643 1.95 jdolecek for (int i = 0; i < wapbl_journal_iobufs; i++) {
644 1.95 jdolecek struct buf *bp;
645 1.95 jdolecek
646 1.95 jdolecek if ((bp = geteblk(MAXPHYS)) == NULL)
647 1.95 jdolecek goto errout;
648 1.95 jdolecek
649 1.95 jdolecek mutex_enter(&bufcache_lock);
650 1.95 jdolecek mutex_enter(devvp->v_interlock);
651 1.95 jdolecek bgetvp(devvp, bp);
652 1.95 jdolecek mutex_exit(devvp->v_interlock);
653 1.95 jdolecek mutex_exit(&bufcache_lock);
654 1.95 jdolecek
655 1.95 jdolecek bp->b_dev = devvp->v_rdev;
656 1.95 jdolecek
657 1.95 jdolecek TAILQ_INSERT_TAIL(&wl->wl_iobufs, bp, b_wapbllist);
658 1.95 jdolecek }
659 1.95 jdolecek
660 1.2 simonb /*
661 1.2 simonb * if there was an existing set of unlinked but
662 1.2 simonb * allocated inodes, preserve it in the new
663 1.2 simonb * log.
664 1.2 simonb */
665 1.2 simonb if (wr && wr->wr_inodescnt) {
666 1.15 joerg error = wapbl_start_flush_inodes(wl, wr);
667 1.2 simonb if (error)
668 1.2 simonb goto errout;
669 1.2 simonb }
670 1.2 simonb
671 1.2 simonb error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
672 1.2 simonb if (error) {
673 1.2 simonb goto errout;
674 1.2 simonb }
675 1.2 simonb
676 1.2 simonb *wlp = wl;
677 1.2 simonb #if defined(WAPBL_DEBUG)
678 1.2 simonb wapbl_debug_wl = wl;
679 1.2 simonb #endif
680 1.2 simonb
681 1.2 simonb return 0;
682 1.114 riastrad errout:
683 1.2 simonb wapbl_discard(wl);
684 1.18 yamt wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
685 1.18 yamt wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
686 1.95 jdolecek while (!TAILQ_EMPTY(&wl->wl_iobufs)) {
687 1.95 jdolecek struct buf *bp;
688 1.95 jdolecek
689 1.95 jdolecek bp = TAILQ_FIRST(&wl->wl_iobufs);
690 1.95 jdolecek TAILQ_REMOVE(&wl->wl_iobufs, bp, b_wapbllist);
691 1.95 jdolecek brelse(bp, BC_INVAL);
692 1.95 jdolecek }
693 1.2 simonb wapbl_inodetrk_free(wl);
694 1.18 yamt wapbl_free(wl, sizeof(*wl));
695 1.2 simonb
696 1.2 simonb return error;
697 1.2 simonb }
698 1.2 simonb
699 1.2 simonb /*
700 1.2 simonb * Like wapbl_flush, only discards the transaction
701 1.2 simonb * completely
702 1.2 simonb */
703 1.2 simonb
704 1.2 simonb void
705 1.2 simonb wapbl_discard(struct wapbl *wl)
706 1.2 simonb {
707 1.2 simonb struct wapbl_entry *we;
708 1.81 jdolecek struct wapbl_dealloc *wd;
709 1.2 simonb struct buf *bp;
710 1.2 simonb int i;
711 1.2 simonb
712 1.2 simonb /*
713 1.2 simonb * XXX we may consider using upgrade here
714 1.2 simonb * if we want to call flush from inside a transaction
715 1.2 simonb */
716 1.2 simonb rw_enter(&wl->wl_rwlock, RW_WRITER);
717 1.86 jdolecek wl->wl_flush(wl->wl_mount, TAILQ_FIRST(&wl->wl_dealloclist));
718 1.2 simonb
719 1.2 simonb #ifdef WAPBL_DEBUG_PRINT
720 1.2 simonb {
721 1.2 simonb pid_t pid = -1;
722 1.2 simonb lwpid_t lid = -1;
723 1.2 simonb if (curproc)
724 1.2 simonb pid = curproc->p_pid;
725 1.2 simonb if (curlwp)
726 1.2 simonb lid = curlwp->l_lid;
727 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
728 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
729 1.2 simonb ("wapbl_discard: thread %d.%d discarding "
730 1.114 riastrad "transaction\n"
731 1.114 riastrad "\tbufcount=%zu bufbytes=%zu bcount=%zu "
732 1.114 riastrad "deallocs=%d inodes=%d\n"
733 1.114 riastrad "\terrcnt = %u, reclaimable=%zu reserved=%zu "
734 1.114 riastrad "unsynced=%zu\n",
735 1.114 riastrad pid, lid,
736 1.114 riastrad wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
737 1.114 riastrad wl->wl_dealloccnt, wl->wl_inohashcnt,
738 1.114 riastrad wl->wl_error_count, wl->wl_reclaimable_bytes,
739 1.114 riastrad wl->wl_reserved_bytes,
740 1.114 riastrad wl->wl_unsynced_bufbytes));
741 1.2 simonb SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
742 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
743 1.2 simonb ("\tentry: bufcount = %zu, reclaimable = %zu, "
744 1.114 riastrad "error = %d, unsynced = %zu\n",
745 1.114 riastrad we->we_bufcount, we->we_reclaimable_bytes,
746 1.114 riastrad we->we_error, we->we_unsynced_bufbytes));
747 1.2 simonb }
748 1.2 simonb #else /* !WAPBL_DEBUG_BUFBYTES */
749 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
750 1.2 simonb ("wapbl_discard: thread %d.%d discarding transaction\n"
751 1.114 riastrad "\tbufcount=%zu bufbytes=%zu bcount=%zu "
752 1.114 riastrad "deallocs=%d inodes=%d\n"
753 1.114 riastrad "\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
754 1.114 riastrad pid, lid,
755 1.114 riastrad wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
756 1.114 riastrad wl->wl_dealloccnt, wl->wl_inohashcnt,
757 1.114 riastrad wl->wl_error_count, wl->wl_reclaimable_bytes,
758 1.114 riastrad wl->wl_reserved_bytes));
759 1.2 simonb SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
760 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
761 1.2 simonb ("\tentry: bufcount = %zu, reclaimable = %zu, "
762 1.114 riastrad "error = %d\n",
763 1.114 riastrad we->we_bufcount, we->we_reclaimable_bytes,
764 1.114 riastrad we->we_error));
765 1.2 simonb }
766 1.2 simonb #endif /* !WAPBL_DEBUG_BUFBYTES */
767 1.2 simonb }
768 1.2 simonb #endif /* WAPBL_DEBUG_PRINT */
769 1.2 simonb
770 1.2 simonb for (i = 0; i <= wl->wl_inohashmask; i++) {
771 1.2 simonb struct wapbl_ino_head *wih;
772 1.2 simonb struct wapbl_ino *wi;
773 1.2 simonb
774 1.2 simonb wih = &wl->wl_inohash[i];
775 1.2 simonb while ((wi = LIST_FIRST(wih)) != NULL) {
776 1.2 simonb LIST_REMOVE(wi, wi_hash);
777 1.2 simonb pool_put(&wapbl_ino_pool, wi);
778 1.2 simonb KASSERT(wl->wl_inohashcnt > 0);
779 1.2 simonb wl->wl_inohashcnt--;
780 1.2 simonb }
781 1.2 simonb }
782 1.2 simonb
783 1.2 simonb /*
784 1.2 simonb * clean buffer list
785 1.2 simonb */
786 1.2 simonb mutex_enter(&bufcache_lock);
787 1.2 simonb mutex_enter(&wl->wl_mtx);
788 1.94 jdolecek while ((bp = TAILQ_FIRST(&wl->wl_bufs)) != NULL) {
789 1.2 simonb if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
790 1.108 jdolecek KASSERT(bp->b_flags & B_LOCKED);
791 1.108 jdolecek KASSERT(bp->b_oflags & BO_DELWRI);
792 1.2 simonb /*
793 1.108 jdolecek * Buffer is already on BQ_LOCKED queue.
794 1.2 simonb * The buffer will be unlocked and
795 1.108 jdolecek * removed from the transaction in brelsel()
796 1.2 simonb */
797 1.2 simonb mutex_exit(&wl->wl_mtx);
798 1.108 jdolecek bremfree(bp);
799 1.108 jdolecek brelsel(bp, BC_INVAL);
800 1.2 simonb mutex_enter(&wl->wl_mtx);
801 1.2 simonb }
802 1.2 simonb }
803 1.2 simonb
804 1.2 simonb /*
805 1.2 simonb * Remove references to this wl from wl_entries, free any which
806 1.107 jdolecek * no longer have buffers, others will be freed in wapbl_biodone()
807 1.2 simonb * when they no longer have any buffers.
808 1.2 simonb */
809 1.2 simonb while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
810 1.2 simonb SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
811 1.2 simonb /* XXX should we be accumulating wl_error_count
812 1.2 simonb * and increasing reclaimable bytes ? */
813 1.2 simonb we->we_wapbl = NULL;
814 1.2 simonb if (we->we_bufcount == 0) {
815 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
816 1.2 simonb KASSERT(we->we_unsynced_bufbytes == 0);
817 1.2 simonb #endif
818 1.51 para pool_put(&wapbl_entry_pool, we);
819 1.2 simonb }
820 1.2 simonb }
821 1.2 simonb
822 1.107 jdolecek mutex_exit(&wl->wl_mtx);
823 1.107 jdolecek mutex_exit(&bufcache_lock);
824 1.107 jdolecek
825 1.2 simonb /* Discard list of deallocs */
826 1.86 jdolecek while ((wd = TAILQ_FIRST(&wl->wl_dealloclist)) != NULL)
827 1.86 jdolecek wapbl_deallocation_free(wl, wd, true);
828 1.81 jdolecek
829 1.2 simonb /* XXX should we clear wl_reserved_bytes? */
830 1.2 simonb
831 1.2 simonb KASSERT(wl->wl_bufbytes == 0);
832 1.2 simonb KASSERT(wl->wl_bcount == 0);
833 1.2 simonb KASSERT(wl->wl_bufcount == 0);
834 1.94 jdolecek KASSERT(TAILQ_EMPTY(&wl->wl_bufs));
835 1.2 simonb KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
836 1.2 simonb KASSERT(wl->wl_inohashcnt == 0);
837 1.86 jdolecek KASSERT(TAILQ_EMPTY(&wl->wl_dealloclist));
838 1.81 jdolecek KASSERT(wl->wl_dealloccnt == 0);
839 1.2 simonb
840 1.2 simonb rw_exit(&wl->wl_rwlock);
841 1.2 simonb }
842 1.2 simonb
843 1.2 simonb int
844 1.2 simonb wapbl_stop(struct wapbl *wl, int force)
845 1.2 simonb {
846 1.2 simonb int error;
847 1.2 simonb
848 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
849 1.2 simonb error = wapbl_flush(wl, 1);
850 1.2 simonb if (error) {
851 1.2 simonb if (force)
852 1.2 simonb wapbl_discard(wl);
853 1.2 simonb else
854 1.2 simonb return error;
855 1.2 simonb }
856 1.2 simonb
857 1.2 simonb /* Unlinked inodes persist after a flush */
858 1.2 simonb if (wl->wl_inohashcnt) {
859 1.2 simonb if (force) {
860 1.2 simonb wapbl_discard(wl);
861 1.2 simonb } else {
862 1.116 riastrad return SET_ERROR(EBUSY);
863 1.2 simonb }
864 1.2 simonb }
865 1.2 simonb
866 1.2 simonb KASSERT(wl->wl_bufbytes == 0);
867 1.2 simonb KASSERT(wl->wl_bcount == 0);
868 1.2 simonb KASSERT(wl->wl_bufcount == 0);
869 1.94 jdolecek KASSERT(TAILQ_EMPTY(&wl->wl_bufs));
870 1.2 simonb KASSERT(wl->wl_dealloccnt == 0);
871 1.2 simonb KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
872 1.2 simonb KASSERT(wl->wl_inohashcnt == 0);
873 1.86 jdolecek KASSERT(TAILQ_EMPTY(&wl->wl_dealloclist));
874 1.81 jdolecek KASSERT(wl->wl_dealloccnt == 0);
875 1.95 jdolecek KASSERT(TAILQ_EMPTY(&wl->wl_iobufs_busy));
876 1.2 simonb
877 1.18 yamt wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
878 1.18 yamt wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
879 1.95 jdolecek while (!TAILQ_EMPTY(&wl->wl_iobufs)) {
880 1.95 jdolecek struct buf *bp;
881 1.95 jdolecek
882 1.95 jdolecek bp = TAILQ_FIRST(&wl->wl_iobufs);
883 1.95 jdolecek TAILQ_REMOVE(&wl->wl_iobufs, bp, b_wapbllist);
884 1.95 jdolecek brelse(bp, BC_INVAL);
885 1.95 jdolecek }
886 1.2 simonb wapbl_inodetrk_free(wl);
887 1.2 simonb
888 1.87 jdolecek wapbl_evcnt_free(wl);
889 1.87 jdolecek
890 1.2 simonb cv_destroy(&wl->wl_reclaimable_cv);
891 1.2 simonb mutex_destroy(&wl->wl_mtx);
892 1.2 simonb rw_destroy(&wl->wl_rwlock);
893 1.18 yamt wapbl_free(wl, sizeof(*wl));
894 1.2 simonb
895 1.2 simonb return 0;
896 1.2 simonb }
897 1.2 simonb
898 1.71 riastrad /****************************************************************/
899 1.71 riastrad /*
900 1.71 riastrad * Unbuffered disk I/O
901 1.71 riastrad */
902 1.71 riastrad
903 1.95 jdolecek static void
904 1.95 jdolecek wapbl_doio_accounting(struct vnode *devvp, int flags)
905 1.2 simonb {
906 1.2 simonb struct pstats *pstats = curlwp->l_proc->p_stats;
907 1.2 simonb
908 1.2 simonb if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
909 1.45 rmind mutex_enter(devvp->v_interlock);
910 1.2 simonb devvp->v_numoutput++;
911 1.45 rmind mutex_exit(devvp->v_interlock);
912 1.2 simonb pstats->p_ru.ru_oublock++;
913 1.2 simonb } else {
914 1.2 simonb pstats->p_ru.ru_inblock++;
915 1.2 simonb }
916 1.2 simonb
917 1.95 jdolecek }
918 1.95 jdolecek
919 1.95 jdolecek static int
920 1.95 jdolecek wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
921 1.95 jdolecek {
922 1.95 jdolecek struct buf *bp;
923 1.95 jdolecek int error;
924 1.95 jdolecek
925 1.95 jdolecek KASSERT(devvp->v_type == VBLK);
926 1.95 jdolecek
927 1.95 jdolecek wapbl_doio_accounting(devvp, flags);
928 1.95 jdolecek
929 1.2 simonb bp = getiobuf(devvp, true);
930 1.2 simonb bp->b_flags = flags;
931 1.105 ad bp->b_cflags |= BC_BUSY; /* mandatory, asserted by biowait() */
932 1.2 simonb bp->b_dev = devvp->v_rdev;
933 1.2 simonb bp->b_data = data;
934 1.2 simonb bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
935 1.2 simonb bp->b_blkno = pbn;
936 1.52 chs BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
937 1.2 simonb
938 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_IO,
939 1.29 pooka ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%"PRIx64"\n",
940 1.114 riastrad BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
941 1.114 riastrad bp->b_blkno, bp->b_dev));
942 1.2 simonb
943 1.2 simonb VOP_STRATEGY(devvp, bp);
944 1.2 simonb
945 1.2 simonb error = biowait(bp);
946 1.2 simonb putiobuf(bp);
947 1.2 simonb
948 1.2 simonb if (error) {
949 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_ERROR,
950 1.2 simonb ("wapbl_doio: %s %zu bytes at block %" PRId64
951 1.114 riastrad " on dev 0x%"PRIx64" failed with error %d\n",
952 1.114 riastrad (((flags & (B_WRITE | B_READ)) == B_WRITE) ?
953 1.114 riastrad "write" : "read"),
954 1.114 riastrad len, pbn, devvp->v_rdev, error));
955 1.2 simonb }
956 1.2 simonb
957 1.2 simonb return error;
958 1.2 simonb }
959 1.2 simonb
960 1.71 riastrad /*
961 1.71 riastrad * wapbl_write(data, len, devvp, pbn)
962 1.71 riastrad *
963 1.71 riastrad * Synchronously write len bytes from data to physical block pbn
964 1.71 riastrad * on devvp.
965 1.71 riastrad */
966 1.2 simonb int
967 1.2 simonb wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
968 1.2 simonb {
969 1.2 simonb
970 1.2 simonb return wapbl_doio(data, len, devvp, pbn, B_WRITE);
971 1.2 simonb }
972 1.2 simonb
973 1.71 riastrad /*
974 1.71 riastrad * wapbl_read(data, len, devvp, pbn)
975 1.71 riastrad *
976 1.71 riastrad * Synchronously read len bytes into data from physical block pbn
977 1.71 riastrad * on devvp.
978 1.71 riastrad */
979 1.2 simonb int
980 1.2 simonb wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
981 1.2 simonb {
982 1.2 simonb
983 1.2 simonb return wapbl_doio(data, len, devvp, pbn, B_READ);
984 1.2 simonb }
985 1.2 simonb
986 1.71 riastrad /****************************************************************/
987 1.71 riastrad /*
988 1.71 riastrad * Buffered disk writes -- try to coalesce writes and emit
989 1.71 riastrad * MAXPHYS-aligned blocks.
990 1.71 riastrad */
991 1.71 riastrad
992 1.2 simonb /*
993 1.95 jdolecek * wapbl_buffered_write_async(wl, bp)
994 1.95 jdolecek *
995 1.95 jdolecek * Send buffer for asynchronous write.
996 1.95 jdolecek */
997 1.95 jdolecek static void
998 1.95 jdolecek wapbl_buffered_write_async(struct wapbl *wl, struct buf *bp)
999 1.95 jdolecek {
1000 1.114 riastrad
1001 1.95 jdolecek wapbl_doio_accounting(wl->wl_devvp, bp->b_flags);
1002 1.95 jdolecek
1003 1.95 jdolecek KASSERT(TAILQ_FIRST(&wl->wl_iobufs) == bp);
1004 1.95 jdolecek TAILQ_REMOVE(&wl->wl_iobufs, bp, b_wapbllist);
1005 1.95 jdolecek
1006 1.101 jdolecek bp->b_flags |= B_WRITE;
1007 1.105 ad bp->b_cflags |= BC_BUSY; /* mandatory, asserted by biowait() */
1008 1.95 jdolecek bp->b_oflags = 0;
1009 1.95 jdolecek bp->b_bcount = bp->b_resid;
1010 1.95 jdolecek BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
1011 1.95 jdolecek
1012 1.95 jdolecek VOP_STRATEGY(wl->wl_devvp, bp);
1013 1.95 jdolecek
1014 1.95 jdolecek wl->wl_ev_journalwrite.ev_count++;
1015 1.95 jdolecek
1016 1.95 jdolecek TAILQ_INSERT_TAIL(&wl->wl_iobufs_busy, bp, b_wapbllist);
1017 1.95 jdolecek }
1018 1.95 jdolecek
1019 1.95 jdolecek /*
1020 1.71 riastrad * wapbl_buffered_flush(wl)
1021 1.71 riastrad *
1022 1.71 riastrad * Flush any buffered writes from wapbl_buffered_write.
1023 1.54 hannken */
1024 1.54 hannken static int
1025 1.95 jdolecek wapbl_buffered_flush(struct wapbl *wl, bool full)
1026 1.54 hannken {
1027 1.95 jdolecek int error = 0;
1028 1.95 jdolecek struct buf *bp, *bnext;
1029 1.95 jdolecek bool only_done = true, found = false;
1030 1.95 jdolecek
1031 1.95 jdolecek /* if there is outstanding buffered write, send it now */
1032 1.95 jdolecek if ((bp = TAILQ_FIRST(&wl->wl_iobufs)) && bp->b_resid > 0)
1033 1.95 jdolecek wapbl_buffered_write_async(wl, bp);
1034 1.95 jdolecek
1035 1.95 jdolecek /* wait for I/O to complete */
1036 1.95 jdolecek again:
1037 1.95 jdolecek TAILQ_FOREACH_SAFE(bp, &wl->wl_iobufs_busy, b_wapbllist, bnext) {
1038 1.95 jdolecek if (!full && only_done) {
1039 1.95 jdolecek /* skip unfinished */
1040 1.95 jdolecek if (!ISSET(bp->b_oflags, BO_DONE))
1041 1.95 jdolecek continue;
1042 1.95 jdolecek }
1043 1.114 riastrad
1044 1.95 jdolecek if (ISSET(bp->b_oflags, BO_DONE))
1045 1.95 jdolecek wl->wl_ev_jbufs_bio_nowait.ev_count++;
1046 1.95 jdolecek
1047 1.95 jdolecek TAILQ_REMOVE(&wl->wl_iobufs_busy, bp, b_wapbllist);
1048 1.95 jdolecek error = biowait(bp);
1049 1.54 hannken
1050 1.95 jdolecek /* reset for reuse */
1051 1.101 jdolecek bp->b_blkno = bp->b_resid = bp->b_flags = 0;
1052 1.95 jdolecek TAILQ_INSERT_TAIL(&wl->wl_iobufs, bp, b_wapbllist);
1053 1.95 jdolecek found = true;
1054 1.54 hannken
1055 1.95 jdolecek if (!full)
1056 1.95 jdolecek break;
1057 1.95 jdolecek }
1058 1.54 hannken
1059 1.95 jdolecek if (!found && only_done && !TAILQ_EMPTY(&wl->wl_iobufs_busy)) {
1060 1.95 jdolecek only_done = false;
1061 1.95 jdolecek goto again;
1062 1.95 jdolecek }
1063 1.87 jdolecek
1064 1.54 hannken return error;
1065 1.54 hannken }
1066 1.54 hannken
1067 1.54 hannken /*
1068 1.71 riastrad * wapbl_buffered_write(data, len, wl, pbn)
1069 1.71 riastrad *
1070 1.71 riastrad * Write len bytes from data to physical block pbn on
1071 1.71 riastrad * wl->wl_devvp. The write may not complete until
1072 1.71 riastrad * wapbl_buffered_flush.
1073 1.54 hannken */
1074 1.54 hannken static int
1075 1.101 jdolecek wapbl_buffered_write(void *data, size_t len, struct wapbl *wl, daddr_t pbn,
1076 1.101 jdolecek int bflags)
1077 1.54 hannken {
1078 1.54 hannken size_t resid;
1079 1.95 jdolecek struct buf *bp;
1080 1.95 jdolecek
1081 1.95 jdolecek again:
1082 1.95 jdolecek bp = TAILQ_FIRST(&wl->wl_iobufs);
1083 1.95 jdolecek
1084 1.95 jdolecek if (bp == NULL) {
1085 1.95 jdolecek /* No more buffers, wait for any previous I/O to finish. */
1086 1.95 jdolecek wapbl_buffered_flush(wl, false);
1087 1.95 jdolecek
1088 1.95 jdolecek bp = TAILQ_FIRST(&wl->wl_iobufs);
1089 1.95 jdolecek KASSERT(bp != NULL);
1090 1.95 jdolecek }
1091 1.54 hannken
1092 1.54 hannken /*
1093 1.54 hannken * If not adjacent to buffered data flush first. Disk block
1094 1.54 hannken * address is always valid for non-empty buffer.
1095 1.54 hannken */
1096 1.95 jdolecek if ((bp->b_resid > 0 && pbn != bp->b_blkno + btodb(bp->b_resid))) {
1097 1.95 jdolecek wapbl_buffered_write_async(wl, bp);
1098 1.95 jdolecek goto again;
1099 1.54 hannken }
1100 1.95 jdolecek
1101 1.54 hannken /*
1102 1.54 hannken * If this write goes to an empty buffer we have to
1103 1.54 hannken * save the disk block address first.
1104 1.54 hannken */
1105 1.101 jdolecek if (bp->b_blkno == 0) {
1106 1.95 jdolecek bp->b_blkno = pbn;
1107 1.101 jdolecek bp->b_flags |= bflags;
1108 1.101 jdolecek }
1109 1.95 jdolecek
1110 1.54 hannken /*
1111 1.95 jdolecek * Remaining space so this buffer ends on a buffer size boundary.
1112 1.54 hannken *
1113 1.54 hannken * Cannot become less or equal zero as the buffer would have been
1114 1.54 hannken * flushed on the last call then.
1115 1.54 hannken */
1116 1.95 jdolecek resid = bp->b_bufsize - dbtob(bp->b_blkno % btodb(bp->b_bufsize)) -
1117 1.95 jdolecek bp->b_resid;
1118 1.54 hannken KASSERT(resid > 0);
1119 1.54 hannken KASSERT(dbtob(btodb(resid)) == resid);
1120 1.95 jdolecek
1121 1.95 jdolecek if (len < resid)
1122 1.95 jdolecek resid = len;
1123 1.95 jdolecek
1124 1.95 jdolecek memcpy((uint8_t *)bp->b_data + bp->b_resid, data, resid);
1125 1.95 jdolecek bp->b_resid += resid;
1126 1.95 jdolecek
1127 1.54 hannken if (len >= resid) {
1128 1.95 jdolecek /* Just filled the buf, or data did not fit */
1129 1.95 jdolecek wapbl_buffered_write_async(wl, bp);
1130 1.95 jdolecek
1131 1.54 hannken data = (uint8_t *)data + resid;
1132 1.54 hannken len -= resid;
1133 1.95 jdolecek pbn += btodb(resid);
1134 1.95 jdolecek
1135 1.95 jdolecek if (len > 0)
1136 1.95 jdolecek goto again;
1137 1.54 hannken }
1138 1.54 hannken
1139 1.54 hannken return 0;
1140 1.54 hannken }
1141 1.54 hannken
1142 1.54 hannken /*
1143 1.71 riastrad * wapbl_circ_write(wl, data, len, offp)
1144 1.71 riastrad *
1145 1.71 riastrad * Write len bytes from data to the circular queue of wl, starting
1146 1.71 riastrad * at linear byte offset *offp, and returning the new linear byte
1147 1.71 riastrad * offset in *offp.
1148 1.71 riastrad *
1149 1.71 riastrad * If the starting linear byte offset precedes wl->wl_circ_off,
1150 1.71 riastrad * the write instead begins at wl->wl_circ_off. XXX WTF? This
1151 1.71 riastrad * should be a KASSERT, not a conditional.
1152 1.71 riastrad *
1153 1.71 riastrad * The write is buffered in wl and must be flushed with
1154 1.71 riastrad * wapbl_buffered_flush before it will be submitted to the disk.
1155 1.2 simonb */
1156 1.2 simonb static int
1157 1.2 simonb wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
1158 1.2 simonb {
1159 1.2 simonb size_t slen;
1160 1.2 simonb off_t off = *offp;
1161 1.2 simonb int error;
1162 1.34 mlelstv daddr_t pbn;
1163 1.2 simonb
1164 1.114 riastrad KDASSERT(((len >> wl->wl_log_dev_bshift) << wl->wl_log_dev_bshift) ==
1165 1.114 riastrad len);
1166 1.2 simonb
1167 1.2 simonb if (off < wl->wl_circ_off)
1168 1.2 simonb off = wl->wl_circ_off;
1169 1.2 simonb slen = wl->wl_circ_off + wl->wl_circ_size - off;
1170 1.2 simonb if (slen < len) {
1171 1.34 mlelstv pbn = wl->wl_logpbn + (off >> wl->wl_log_dev_bshift);
1172 1.34 mlelstv #ifdef _KERNEL
1173 1.34 mlelstv pbn = btodb(pbn << wl->wl_log_dev_bshift);
1174 1.34 mlelstv #endif
1175 1.101 jdolecek error = wapbl_buffered_write(data, slen, wl, pbn,
1176 1.101 jdolecek WAPBL_JDATA_FLAGS(wl));
1177 1.2 simonb if (error)
1178 1.2 simonb return error;
1179 1.2 simonb data = (uint8_t *)data + slen;
1180 1.2 simonb len -= slen;
1181 1.2 simonb off = wl->wl_circ_off;
1182 1.2 simonb }
1183 1.34 mlelstv pbn = wl->wl_logpbn + (off >> wl->wl_log_dev_bshift);
1184 1.34 mlelstv #ifdef _KERNEL
1185 1.34 mlelstv pbn = btodb(pbn << wl->wl_log_dev_bshift);
1186 1.34 mlelstv #endif
1187 1.101 jdolecek error = wapbl_buffered_write(data, len, wl, pbn,
1188 1.101 jdolecek WAPBL_JDATA_FLAGS(wl));
1189 1.2 simonb if (error)
1190 1.2 simonb return error;
1191 1.2 simonb off += len;
1192 1.2 simonb if (off >= wl->wl_circ_off + wl->wl_circ_size)
1193 1.2 simonb off = wl->wl_circ_off;
1194 1.2 simonb *offp = off;
1195 1.2 simonb return 0;
1196 1.2 simonb }
1197 1.2 simonb
1198 1.2 simonb /****************************************************************/
1199 1.71 riastrad /*
1200 1.71 riastrad * WAPBL transactions: entering, adding/removing bufs, and exiting
1201 1.71 riastrad */
1202 1.2 simonb
1203 1.2 simonb int
1204 1.2 simonb wapbl_begin(struct wapbl *wl, const char *file, int line)
1205 1.2 simonb {
1206 1.2 simonb int doflush;
1207 1.2 simonb unsigned lockcount;
1208 1.2 simonb
1209 1.2 simonb KDASSERT(wl);
1210 1.2 simonb
1211 1.2 simonb /*
1212 1.2 simonb * XXX this needs to be made much more sophisticated.
1213 1.2 simonb * perhaps each wapbl_begin could reserve a specified
1214 1.2 simonb * number of buffers and bytes.
1215 1.2 simonb */
1216 1.2 simonb mutex_enter(&wl->wl_mtx);
1217 1.2 simonb lockcount = wl->wl_lock_count;
1218 1.2 simonb doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) >
1219 1.114 riastrad wl->wl_bufbytes_max / 2) ||
1220 1.114 riastrad ((wl->wl_bufcount + (lockcount * 10)) >
1221 1.114 riastrad wl->wl_bufcount_max / 2) ||
1222 1.114 riastrad (wapbl_transaction_len(wl) > wl->wl_circ_size / 2) ||
1223 1.114 riastrad (wl->wl_dealloccnt >= (wl->wl_dealloclim / 2));
1224 1.2 simonb mutex_exit(&wl->wl_mtx);
1225 1.2 simonb
1226 1.2 simonb if (doflush) {
1227 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1228 1.2 simonb ("force flush lockcnt=%d bufbytes=%zu "
1229 1.114 riastrad "(max=%zu) bufcount=%zu (max=%zu) "
1230 1.114 riastrad "dealloccnt %d (lim=%d)\n",
1231 1.114 riastrad lockcount, wl->wl_bufbytes,
1232 1.114 riastrad wl->wl_bufbytes_max, wl->wl_bufcount,
1233 1.114 riastrad wl->wl_bufcount_max,
1234 1.114 riastrad wl->wl_dealloccnt, wl->wl_dealloclim));
1235 1.2 simonb }
1236 1.2 simonb
1237 1.2 simonb if (doflush) {
1238 1.2 simonb int error = wapbl_flush(wl, 0);
1239 1.2 simonb if (error)
1240 1.2 simonb return error;
1241 1.2 simonb }
1242 1.2 simonb
1243 1.23 ad rw_enter(&wl->wl_rwlock, RW_READER);
1244 1.2 simonb mutex_enter(&wl->wl_mtx);
1245 1.2 simonb wl->wl_lock_count++;
1246 1.2 simonb mutex_exit(&wl->wl_mtx);
1247 1.2 simonb
1248 1.23 ad #if defined(WAPBL_DEBUG_PRINT)
1249 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
1250 1.2 simonb ("wapbl_begin thread %d.%d with bufcount=%zu "
1251 1.114 riastrad "bufbytes=%zu bcount=%zu at %s:%d\n",
1252 1.114 riastrad curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1253 1.114 riastrad wl->wl_bufbytes, wl->wl_bcount, file, line));
1254 1.2 simonb #endif
1255 1.2 simonb
1256 1.2 simonb return 0;
1257 1.2 simonb }
1258 1.2 simonb
1259 1.2 simonb void
1260 1.2 simonb wapbl_end(struct wapbl *wl)
1261 1.2 simonb {
1262 1.2 simonb
1263 1.23 ad #if defined(WAPBL_DEBUG_PRINT)
1264 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
1265 1.114 riastrad ("wapbl_end thread %d.%d with bufcount=%zu "
1266 1.114 riastrad "bufbytes=%zu bcount=%zu\n",
1267 1.114 riastrad curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1268 1.114 riastrad wl->wl_bufbytes, wl->wl_bcount));
1269 1.2 simonb #endif
1270 1.2 simonb
1271 1.65 riastrad /*
1272 1.65 riastrad * XXX this could be handled more gracefully, perhaps place
1273 1.65 riastrad * only a partial transaction in the log and allow the
1274 1.65 riastrad * remaining to flush without the protection of the journal.
1275 1.65 riastrad */
1276 1.67 riastrad KASSERTMSG((wapbl_transaction_len(wl) <=
1277 1.67 riastrad (wl->wl_circ_size - wl->wl_reserved_bytes)),
1278 1.65 riastrad "wapbl_end: current transaction too big to flush");
1279 1.40 bouyer
1280 1.2 simonb mutex_enter(&wl->wl_mtx);
1281 1.2 simonb KASSERT(wl->wl_lock_count > 0);
1282 1.2 simonb wl->wl_lock_count--;
1283 1.2 simonb mutex_exit(&wl->wl_mtx);
1284 1.2 simonb
1285 1.2 simonb rw_exit(&wl->wl_rwlock);
1286 1.2 simonb }
1287 1.2 simonb
1288 1.2 simonb void
1289 1.2 simonb wapbl_add_buf(struct wapbl *wl, struct buf * bp)
1290 1.2 simonb {
1291 1.2 simonb
1292 1.2 simonb KASSERT(bp->b_cflags & BC_BUSY);
1293 1.2 simonb KASSERT(bp->b_vp);
1294 1.2 simonb
1295 1.2 simonb wapbl_jlock_assert(wl);
1296 1.2 simonb
1297 1.2 simonb #if 0
1298 1.2 simonb /*
1299 1.2 simonb * XXX this might be an issue for swapfiles.
1300 1.2 simonb * see uvm_swap.c:1702
1301 1.2 simonb *
1302 1.2 simonb * XXX2 why require it then? leap of semantics?
1303 1.2 simonb */
1304 1.2 simonb KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
1305 1.2 simonb #endif
1306 1.2 simonb
1307 1.2 simonb mutex_enter(&wl->wl_mtx);
1308 1.2 simonb if (bp->b_flags & B_LOCKED) {
1309 1.94 jdolecek TAILQ_REMOVE(&wl->wl_bufs, bp, b_wapbllist);
1310 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
1311 1.114 riastrad ("wapbl_add_buf thread %d.%d re-adding buf %p "
1312 1.114 riastrad "with %d bytes %d bcount\n",
1313 1.114 riastrad curproc->p_pid, curlwp->l_lid, bp,
1314 1.114 riastrad bp->b_bufsize, bp->b_bcount));
1315 1.2 simonb } else {
1316 1.2 simonb /* unlocked by dirty buffers shouldn't exist */
1317 1.2 simonb KASSERT(!(bp->b_oflags & BO_DELWRI));
1318 1.2 simonb wl->wl_bufbytes += bp->b_bufsize;
1319 1.2 simonb wl->wl_bcount += bp->b_bcount;
1320 1.2 simonb wl->wl_bufcount++;
1321 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
1322 1.114 riastrad ("wapbl_add_buf thread %d.%d adding buf %p "
1323 1.114 riastrad "with %d bytes %d bcount\n",
1324 1.114 riastrad curproc->p_pid, curlwp->l_lid, bp,
1325 1.114 riastrad bp->b_bufsize, bp->b_bcount));
1326 1.2 simonb }
1327 1.94 jdolecek TAILQ_INSERT_TAIL(&wl->wl_bufs, bp, b_wapbllist);
1328 1.2 simonb mutex_exit(&wl->wl_mtx);
1329 1.2 simonb
1330 1.2 simonb bp->b_flags |= B_LOCKED;
1331 1.2 simonb }
1332 1.2 simonb
1333 1.2 simonb static void
1334 1.2 simonb wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
1335 1.2 simonb {
1336 1.2 simonb
1337 1.2 simonb KASSERT(mutex_owned(&wl->wl_mtx));
1338 1.2 simonb KASSERT(bp->b_cflags & BC_BUSY);
1339 1.2 simonb wapbl_jlock_assert(wl);
1340 1.2 simonb
1341 1.2 simonb #if 0
1342 1.2 simonb /*
1343 1.2 simonb * XXX this might be an issue for swapfiles.
1344 1.2 simonb * see uvm_swap.c:1725
1345 1.2 simonb *
1346 1.2 simonb * XXXdeux: see above
1347 1.2 simonb */
1348 1.2 simonb KASSERT((bp->b_flags & BC_NOCACHE) == 0);
1349 1.2 simonb #endif
1350 1.2 simonb KASSERT(bp->b_flags & B_LOCKED);
1351 1.2 simonb
1352 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
1353 1.114 riastrad ("wapbl_remove_buf thread %d.%d removing buf %p with "
1354 1.114 riastrad "%d bytes %d bcount\n",
1355 1.114 riastrad curproc->p_pid, curlwp->l_lid, bp,
1356 1.114 riastrad bp->b_bufsize, bp->b_bcount));
1357 1.2 simonb
1358 1.2 simonb KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
1359 1.2 simonb wl->wl_bufbytes -= bp->b_bufsize;
1360 1.2 simonb KASSERT(wl->wl_bcount >= bp->b_bcount);
1361 1.2 simonb wl->wl_bcount -= bp->b_bcount;
1362 1.2 simonb KASSERT(wl->wl_bufcount > 0);
1363 1.2 simonb wl->wl_bufcount--;
1364 1.2 simonb KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1365 1.2 simonb KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1366 1.94 jdolecek TAILQ_REMOVE(&wl->wl_bufs, bp, b_wapbllist);
1367 1.2 simonb
1368 1.2 simonb bp->b_flags &= ~B_LOCKED;
1369 1.2 simonb }
1370 1.2 simonb
1371 1.2 simonb /* called from brelsel() in vfs_bio among other places */
1372 1.2 simonb void
1373 1.2 simonb wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
1374 1.2 simonb {
1375 1.2 simonb
1376 1.2 simonb mutex_enter(&wl->wl_mtx);
1377 1.2 simonb wapbl_remove_buf_locked(wl, bp);
1378 1.2 simonb mutex_exit(&wl->wl_mtx);
1379 1.2 simonb }
1380 1.2 simonb
1381 1.2 simonb void
1382 1.2 simonb wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
1383 1.2 simonb {
1384 1.2 simonb
1385 1.2 simonb KASSERT(bp->b_cflags & BC_BUSY);
1386 1.2 simonb
1387 1.2 simonb /*
1388 1.2 simonb * XXX: why does this depend on B_LOCKED? otherwise the buf
1389 1.2 simonb * is not for a transaction? if so, why is this called in the
1390 1.2 simonb * first place?
1391 1.2 simonb */
1392 1.2 simonb if (bp->b_flags & B_LOCKED) {
1393 1.2 simonb mutex_enter(&wl->wl_mtx);
1394 1.2 simonb wl->wl_bufbytes += bp->b_bufsize - oldsz;
1395 1.2 simonb wl->wl_bcount += bp->b_bcount - oldcnt;
1396 1.2 simonb mutex_exit(&wl->wl_mtx);
1397 1.2 simonb }
1398 1.2 simonb }
1399 1.2 simonb
1400 1.2 simonb #endif /* _KERNEL */
1401 1.2 simonb
1402 1.2 simonb /****************************************************************/
1403 1.2 simonb /* Some utility inlines */
1404 1.2 simonb
1405 1.71 riastrad /*
1406 1.71 riastrad * wapbl_space_used(avail, head, tail)
1407 1.71 riastrad *
1408 1.71 riastrad * Number of bytes used in a circular queue of avail total bytes,
1409 1.71 riastrad * from tail to head.
1410 1.71 riastrad */
1411 1.56 joerg static inline size_t
1412 1.56 joerg wapbl_space_used(size_t avail, off_t head, off_t tail)
1413 1.56 joerg {
1414 1.56 joerg
1415 1.56 joerg if (tail == 0) {
1416 1.56 joerg KASSERT(head == 0);
1417 1.56 joerg return 0;
1418 1.56 joerg }
1419 1.56 joerg return ((head + (avail - 1) - tail) % avail) + 1;
1420 1.56 joerg }
1421 1.56 joerg
1422 1.56 joerg #ifdef _KERNEL
1423 1.71 riastrad /*
1424 1.71 riastrad * wapbl_advance(size, off, oldoff, delta)
1425 1.71 riastrad *
1426 1.71 riastrad * Given a byte offset oldoff into a circular queue of size bytes
1427 1.71 riastrad * starting at off, return a new byte offset oldoff + delta into
1428 1.71 riastrad * the circular queue.
1429 1.71 riastrad */
1430 1.30 uebayasi static inline off_t
1431 1.60 matt wapbl_advance(size_t size, size_t off, off_t oldoff, size_t delta)
1432 1.2 simonb {
1433 1.60 matt off_t newoff;
1434 1.2 simonb
1435 1.2 simonb /* Define acceptable ranges for inputs. */
1436 1.46 christos KASSERT(delta <= (size_t)size);
1437 1.114 riastrad KASSERT(oldoff == 0 || (size_t)oldoff >= off);
1438 1.60 matt KASSERT(oldoff < (off_t)(size + off));
1439 1.2 simonb
1440 1.114 riastrad if (oldoff == 0 && delta != 0)
1441 1.60 matt newoff = off + delta;
1442 1.114 riastrad else if (oldoff + delta < size + off)
1443 1.60 matt newoff = oldoff + delta;
1444 1.2 simonb else
1445 1.60 matt newoff = (oldoff + delta) - size;
1446 1.2 simonb
1447 1.2 simonb /* Note some interesting axioms */
1448 1.114 riastrad KASSERT(delta != 0 || newoff == oldoff);
1449 1.114 riastrad KASSERT(delta == 0 || newoff != 0);
1450 1.114 riastrad KASSERT(delta != size || newoff == oldoff);
1451 1.2 simonb
1452 1.2 simonb /* Define acceptable ranges for output. */
1453 1.114 riastrad KASSERT(newoff == 0 || (size_t)newoff >= off);
1454 1.114 riastrad KASSERT((size_t)newoff < size + off);
1455 1.60 matt return newoff;
1456 1.2 simonb }
1457 1.2 simonb
1458 1.71 riastrad /*
1459 1.71 riastrad * wapbl_space_free(avail, head, tail)
1460 1.71 riastrad *
1461 1.71 riastrad * Number of bytes free in a circular queue of avail total bytes,
1462 1.71 riastrad * in which everything from tail to head is used.
1463 1.71 riastrad */
1464 1.30 uebayasi static inline size_t
1465 1.2 simonb wapbl_space_free(size_t avail, off_t head, off_t tail)
1466 1.2 simonb {
1467 1.2 simonb
1468 1.2 simonb return avail - wapbl_space_used(avail, head, tail);
1469 1.2 simonb }
1470 1.2 simonb
1471 1.71 riastrad /*
1472 1.71 riastrad * wapbl_advance_head(size, off, delta, headp, tailp)
1473 1.71 riastrad *
1474 1.71 riastrad * In a circular queue of size bytes starting at off, given the
1475 1.71 riastrad * old head and tail offsets *headp and *tailp, store the new head
1476 1.71 riastrad * and tail offsets in *headp and *tailp resulting from adding
1477 1.71 riastrad * delta bytes of data to the head.
1478 1.71 riastrad */
1479 1.30 uebayasi static inline void
1480 1.2 simonb wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1481 1.114 riastrad off_t *tailp)
1482 1.2 simonb {
1483 1.2 simonb off_t head = *headp;
1484 1.2 simonb off_t tail = *tailp;
1485 1.2 simonb
1486 1.2 simonb KASSERT(delta <= wapbl_space_free(size, head, tail));
1487 1.2 simonb head = wapbl_advance(size, off, head, delta);
1488 1.114 riastrad if (tail == 0 && head != 0)
1489 1.2 simonb tail = off;
1490 1.2 simonb *headp = head;
1491 1.2 simonb *tailp = tail;
1492 1.2 simonb }
1493 1.2 simonb
1494 1.71 riastrad /*
1495 1.71 riastrad * wapbl_advance_tail(size, off, delta, headp, tailp)
1496 1.71 riastrad *
1497 1.71 riastrad * In a circular queue of size bytes starting at off, given the
1498 1.71 riastrad * old head and tail offsets *headp and *tailp, store the new head
1499 1.71 riastrad * and tail offsets in *headp and *tailp resulting from removing
1500 1.71 riastrad * delta bytes of data from the tail.
1501 1.71 riastrad */
1502 1.30 uebayasi static inline void
1503 1.2 simonb wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1504 1.114 riastrad off_t *tailp)
1505 1.2 simonb {
1506 1.2 simonb off_t head = *headp;
1507 1.2 simonb off_t tail = *tailp;
1508 1.2 simonb
1509 1.2 simonb KASSERT(delta <= wapbl_space_used(size, head, tail));
1510 1.2 simonb tail = wapbl_advance(size, off, tail, delta);
1511 1.2 simonb if (head == tail) {
1512 1.2 simonb head = tail = 0;
1513 1.2 simonb }
1514 1.2 simonb *headp = head;
1515 1.2 simonb *tailp = tail;
1516 1.2 simonb }
1517 1.2 simonb
1518 1.2 simonb
1519 1.2 simonb /****************************************************************/
1520 1.2 simonb
1521 1.2 simonb /*
1522 1.73 riastrad * wapbl_truncate(wl, minfree)
1523 1.71 riastrad *
1524 1.71 riastrad * Wait until at least minfree bytes are available in the log.
1525 1.71 riastrad *
1526 1.73 riastrad * If it was necessary to wait for writes to complete,
1527 1.73 riastrad * advance the circular queue tail to reflect the new write
1528 1.73 riastrad * completions and issue a write commit to the log.
1529 1.71 riastrad *
1530 1.71 riastrad * => Caller must hold wl->wl_rwlock writer lock.
1531 1.2 simonb */
1532 1.2 simonb static int
1533 1.73 riastrad wapbl_truncate(struct wapbl *wl, size_t minfree)
1534 1.2 simonb {
1535 1.2 simonb size_t delta;
1536 1.2 simonb size_t avail;
1537 1.2 simonb off_t head;
1538 1.2 simonb off_t tail;
1539 1.2 simonb int error = 0;
1540 1.2 simonb
1541 1.2 simonb KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1542 1.2 simonb KASSERT(rw_write_held(&wl->wl_rwlock));
1543 1.2 simonb
1544 1.2 simonb mutex_enter(&wl->wl_mtx);
1545 1.2 simonb
1546 1.2 simonb /*
1547 1.2 simonb * First check to see if we have to do a commit
1548 1.2 simonb * at all.
1549 1.2 simonb */
1550 1.2 simonb avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1551 1.2 simonb if (minfree < avail) {
1552 1.2 simonb mutex_exit(&wl->wl_mtx);
1553 1.2 simonb return 0;
1554 1.2 simonb }
1555 1.2 simonb minfree -= avail;
1556 1.114 riastrad while (wl->wl_error_count == 0 &&
1557 1.114 riastrad wl->wl_reclaimable_bytes < minfree) {
1558 1.115 riastrad WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1559 1.114 riastrad ("wapbl_truncate: sleeping on %p"
1560 1.114 riastrad " wl=%p bytes=%zd minfree=%zd\n",
1561 1.114 riastrad &wl->wl_reclaimable_bytes,
1562 1.114 riastrad wl, wl->wl_reclaimable_bytes, minfree));
1563 1.2 simonb cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1564 1.2 simonb }
1565 1.2 simonb if (wl->wl_reclaimable_bytes < minfree) {
1566 1.2 simonb KASSERT(wl->wl_error_count);
1567 1.2 simonb /* XXX maybe get actual error from buffer instead someday? */
1568 1.116 riastrad error = SET_ERROR(EIO);
1569 1.2 simonb }
1570 1.2 simonb head = wl->wl_head;
1571 1.2 simonb tail = wl->wl_tail;
1572 1.2 simonb delta = wl->wl_reclaimable_bytes;
1573 1.2 simonb
1574 1.113 msaitoh /* If all of the entries are flushed, then be sure to keep
1575 1.2 simonb * the reserved bytes reserved. Watch out for discarded transactions,
1576 1.2 simonb * which could leave more bytes reserved than are reclaimable.
1577 1.2 simonb */
1578 1.114 riastrad if (SIMPLEQ_EMPTY(&wl->wl_entries) && delta >= wl->wl_reserved_bytes) {
1579 1.2 simonb delta -= wl->wl_reserved_bytes;
1580 1.2 simonb }
1581 1.2 simonb wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1582 1.114 riastrad &tail);
1583 1.2 simonb KDASSERT(wl->wl_reserved_bytes <=
1584 1.114 riastrad wapbl_space_used(wl->wl_circ_size, head, tail));
1585 1.2 simonb mutex_exit(&wl->wl_mtx);
1586 1.2 simonb
1587 1.2 simonb if (error)
1588 1.2 simonb return error;
1589 1.2 simonb
1590 1.2 simonb /*
1591 1.2 simonb * This is where head, tail and delta are unprotected
1592 1.2 simonb * from races against itself or flush. This is ok since
1593 1.2 simonb * we only call this routine from inside flush itself.
1594 1.2 simonb *
1595 1.2 simonb * XXX: how can it race against itself when accessed only
1596 1.2 simonb * from behind the write-locked rwlock?
1597 1.2 simonb */
1598 1.2 simonb error = wapbl_write_commit(wl, head, tail);
1599 1.2 simonb if (error)
1600 1.2 simonb return error;
1601 1.2 simonb
1602 1.2 simonb wl->wl_head = head;
1603 1.2 simonb wl->wl_tail = tail;
1604 1.2 simonb
1605 1.2 simonb mutex_enter(&wl->wl_mtx);
1606 1.2 simonb KASSERT(wl->wl_reclaimable_bytes >= delta);
1607 1.2 simonb wl->wl_reclaimable_bytes -= delta;
1608 1.2 simonb mutex_exit(&wl->wl_mtx);
1609 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1610 1.2 simonb ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1611 1.114 riastrad curproc->p_pid, curlwp->l_lid, delta));
1612 1.2 simonb
1613 1.2 simonb return 0;
1614 1.2 simonb }
1615 1.2 simonb
1616 1.2 simonb /****************************************************************/
1617 1.2 simonb
1618 1.2 simonb void
1619 1.2 simonb wapbl_biodone(struct buf *bp)
1620 1.2 simonb {
1621 1.2 simonb struct wapbl_entry *we = bp->b_private;
1622 1.107 jdolecek struct wapbl *wl;
1623 1.53 hannken #ifdef WAPBL_DEBUG_BUFBYTES
1624 1.53 hannken const int bufsize = bp->b_bufsize;
1625 1.53 hannken #endif
1626 1.2 simonb
1627 1.107 jdolecek mutex_enter(&bufcache_lock);
1628 1.107 jdolecek wl = we->we_wapbl;
1629 1.107 jdolecek mutex_exit(&bufcache_lock);
1630 1.107 jdolecek
1631 1.2 simonb /*
1632 1.2 simonb * Handle possible flushing of buffers after log has been
1633 1.2 simonb * decomissioned.
1634 1.2 simonb */
1635 1.2 simonb if (!wl) {
1636 1.2 simonb KASSERT(we->we_bufcount > 0);
1637 1.2 simonb we->we_bufcount--;
1638 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1639 1.53 hannken KASSERT(we->we_unsynced_bufbytes >= bufsize);
1640 1.53 hannken we->we_unsynced_bufbytes -= bufsize;
1641 1.2 simonb #endif
1642 1.2 simonb
1643 1.2 simonb if (we->we_bufcount == 0) {
1644 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1645 1.2 simonb KASSERT(we->we_unsynced_bufbytes == 0);
1646 1.2 simonb #endif
1647 1.51 para pool_put(&wapbl_entry_pool, we);
1648 1.2 simonb }
1649 1.2 simonb
1650 1.2 simonb brelse(bp, 0);
1651 1.2 simonb return;
1652 1.2 simonb }
1653 1.2 simonb
1654 1.2 simonb #ifdef ohbother
1655 1.44 uebayasi KDASSERT(bp->b_oflags & BO_DONE);
1656 1.44 uebayasi KDASSERT(!(bp->b_oflags & BO_DELWRI));
1657 1.2 simonb KDASSERT(bp->b_flags & B_ASYNC);
1658 1.44 uebayasi KDASSERT(bp->b_cflags & BC_BUSY);
1659 1.2 simonb KDASSERT(!(bp->b_flags & B_LOCKED));
1660 1.2 simonb KDASSERT(!(bp->b_flags & B_READ));
1661 1.44 uebayasi KDASSERT(!(bp->b_cflags & BC_INVAL));
1662 1.44 uebayasi KDASSERT(!(bp->b_cflags & BC_NOCACHE));
1663 1.2 simonb #endif
1664 1.2 simonb
1665 1.2 simonb if (bp->b_error) {
1666 1.26 apb /*
1667 1.78 riastrad * If an error occurs, it would be nice to leave the buffer
1668 1.78 riastrad * as a delayed write on the LRU queue so that we can retry
1669 1.78 riastrad * it later. But buffercache(9) can't handle dirty buffer
1670 1.78 riastrad * reuse, so just mark the log permanently errored out.
1671 1.26 apb */
1672 1.2 simonb mutex_enter(&wl->wl_mtx);
1673 1.2 simonb if (wl->wl_error_count == 0) {
1674 1.2 simonb wl->wl_error_count++;
1675 1.2 simonb cv_broadcast(&wl->wl_reclaimable_cv);
1676 1.2 simonb }
1677 1.2 simonb mutex_exit(&wl->wl_mtx);
1678 1.2 simonb }
1679 1.2 simonb
1680 1.53 hannken /*
1681 1.93 jdolecek * Make sure that the buf doesn't retain the media flags, so that
1682 1.93 jdolecek * e.g. wapbl_allow_fuadpo has immediate effect on any following I/O.
1683 1.93 jdolecek * The flags will be set again if needed by another I/O.
1684 1.93 jdolecek */
1685 1.93 jdolecek bp->b_flags &= ~B_MEDIA_FLAGS;
1686 1.93 jdolecek
1687 1.93 jdolecek /*
1688 1.53 hannken * Release the buffer here. wapbl_flush() may wait for the
1689 1.53 hannken * log to become empty and we better unbusy the buffer before
1690 1.53 hannken * wapbl_flush() returns.
1691 1.53 hannken */
1692 1.53 hannken brelse(bp, 0);
1693 1.53 hannken
1694 1.2 simonb mutex_enter(&wl->wl_mtx);
1695 1.2 simonb
1696 1.2 simonb KASSERT(we->we_bufcount > 0);
1697 1.2 simonb we->we_bufcount--;
1698 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1699 1.53 hannken KASSERT(we->we_unsynced_bufbytes >= bufsize);
1700 1.53 hannken we->we_unsynced_bufbytes -= bufsize;
1701 1.53 hannken KASSERT(wl->wl_unsynced_bufbytes >= bufsize);
1702 1.53 hannken wl->wl_unsynced_bufbytes -= bufsize;
1703 1.2 simonb #endif
1704 1.87 jdolecek wl->wl_ev_metawrite.ev_count++;
1705 1.2 simonb
1706 1.2 simonb /*
1707 1.2 simonb * If the current transaction can be reclaimed, start
1708 1.2 simonb * at the beginning and reclaim any consecutive reclaimable
1709 1.2 simonb * transactions. If we successfully reclaim anything,
1710 1.2 simonb * then wakeup anyone waiting for the reclaim.
1711 1.2 simonb */
1712 1.2 simonb if (we->we_bufcount == 0) {
1713 1.2 simonb size_t delta = 0;
1714 1.2 simonb int errcnt = 0;
1715 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1716 1.2 simonb KDASSERT(we->we_unsynced_bufbytes == 0);
1717 1.2 simonb #endif
1718 1.2 simonb /*
1719 1.2 simonb * clear any posted error, since the buffer it came from
1720 1.2 simonb * has successfully flushed by now
1721 1.2 simonb */
1722 1.2 simonb while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1723 1.114 riastrad we->we_bufcount == 0) {
1724 1.2 simonb delta += we->we_reclaimable_bytes;
1725 1.2 simonb if (we->we_error)
1726 1.2 simonb errcnt++;
1727 1.2 simonb SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1728 1.51 para pool_put(&wapbl_entry_pool, we);
1729 1.2 simonb }
1730 1.2 simonb
1731 1.2 simonb if (delta) {
1732 1.2 simonb wl->wl_reclaimable_bytes += delta;
1733 1.2 simonb KASSERT(wl->wl_error_count >= errcnt);
1734 1.2 simonb wl->wl_error_count -= errcnt;
1735 1.2 simonb cv_broadcast(&wl->wl_reclaimable_cv);
1736 1.2 simonb }
1737 1.2 simonb }
1738 1.2 simonb
1739 1.2 simonb mutex_exit(&wl->wl_mtx);
1740 1.2 simonb }
1741 1.2 simonb
1742 1.2 simonb /*
1743 1.71 riastrad * wapbl_flush(wl, wait)
1744 1.71 riastrad *
1745 1.71 riastrad * Flush pending block writes, deallocations, and inodes from
1746 1.71 riastrad * the current transaction in memory to the log on disk:
1747 1.71 riastrad *
1748 1.71 riastrad * 1. Call the file system's wl_flush callback to flush any
1749 1.71 riastrad * per-file-system pending updates.
1750 1.71 riastrad * 2. Wait for enough space in the log for the current transaction.
1751 1.71 riastrad * 3. Synchronously write the new log records, advancing the
1752 1.71 riastrad * circular queue head.
1753 1.77 riastrad * 4. Issue the pending block writes asynchronously, now that they
1754 1.77 riastrad * are recorded in the log and can be replayed after crash.
1755 1.77 riastrad * 5. If wait is true, wait for all writes to complete and for the
1756 1.77 riastrad * log to become empty.
1757 1.71 riastrad *
1758 1.71 riastrad * On failure, call the file system's wl_flush_abort callback.
1759 1.2 simonb */
1760 1.2 simonb int
1761 1.2 simonb wapbl_flush(struct wapbl *wl, int waitfor)
1762 1.2 simonb {
1763 1.2 simonb struct buf *bp;
1764 1.2 simonb struct wapbl_entry *we;
1765 1.2 simonb off_t off;
1766 1.2 simonb off_t head;
1767 1.2 simonb off_t tail;
1768 1.2 simonb size_t delta = 0;
1769 1.2 simonb size_t flushsize;
1770 1.2 simonb size_t reserved;
1771 1.2 simonb int error = 0;
1772 1.2 simonb
1773 1.2 simonb /*
1774 1.2 simonb * Do a quick check to see if a full flush can be skipped
1775 1.2 simonb * This assumes that the flush callback does not need to be called
1776 1.2 simonb * unless there are other outstanding bufs.
1777 1.2 simonb */
1778 1.2 simonb if (!waitfor) {
1779 1.2 simonb size_t nbufs;
1780 1.2 simonb mutex_enter(&wl->wl_mtx); /* XXX need mutex here to
1781 1.2 simonb protect the KASSERTS */
1782 1.2 simonb nbufs = wl->wl_bufcount;
1783 1.2 simonb KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1784 1.2 simonb KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1785 1.2 simonb mutex_exit(&wl->wl_mtx);
1786 1.2 simonb if (nbufs == 0)
1787 1.2 simonb return 0;
1788 1.2 simonb }
1789 1.2 simonb
1790 1.2 simonb /*
1791 1.2 simonb * XXX we may consider using LK_UPGRADE here
1792 1.2 simonb * if we want to call flush from inside a transaction
1793 1.2 simonb */
1794 1.2 simonb rw_enter(&wl->wl_rwlock, RW_WRITER);
1795 1.86 jdolecek wl->wl_flush(wl->wl_mount, TAILQ_FIRST(&wl->wl_dealloclist));
1796 1.2 simonb
1797 1.2 simonb /*
1798 1.75 riastrad * Now that we are exclusively locked and the file system has
1799 1.75 riastrad * issued any deferred block writes for this transaction, check
1800 1.75 riastrad * whether there are any blocks to write to the log. If not,
1801 1.75 riastrad * skip waiting for space or writing any log entries.
1802 1.75 riastrad *
1803 1.75 riastrad * XXX Shouldn't this also check wl_dealloccnt and
1804 1.75 riastrad * wl_inohashcnt? Perhaps wl_dealloccnt doesn't matter if the
1805 1.75 riastrad * file system didn't produce any blocks as a consequence of
1806 1.75 riastrad * it, but the same does not seem to be so of wl_inohashcnt.
1807 1.2 simonb */
1808 1.2 simonb if (wl->wl_bufcount == 0) {
1809 1.69 riastrad goto wait_out;
1810 1.2 simonb }
1811 1.2 simonb
1812 1.2 simonb #if 0
1813 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1814 1.114 riastrad ("wapbl_flush thread %d.%d flushing entries with "
1815 1.114 riastrad "bufcount=%zu bufbytes=%zu\n",
1816 1.114 riastrad curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1817 1.114 riastrad wl->wl_bufbytes));
1818 1.2 simonb #endif
1819 1.2 simonb
1820 1.2 simonb /* Calculate amount of space needed to flush */
1821 1.2 simonb flushsize = wapbl_transaction_len(wl);
1822 1.39 christos if (wapbl_verbose_commit) {
1823 1.39 christos struct timespec ts;
1824 1.39 christos getnanotime(&ts);
1825 1.43 nakayama printf("%s: %lld.%09ld this transaction = %zu bytes\n",
1826 1.39 christos __func__, (long long)ts.tv_sec,
1827 1.39 christos (long)ts.tv_nsec, flushsize);
1828 1.39 christos }
1829 1.2 simonb
1830 1.2 simonb if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1831 1.2 simonb /*
1832 1.2 simonb * XXX this could be handled more gracefully, perhaps place
1833 1.2 simonb * only a partial transaction in the log and allow the
1834 1.2 simonb * remaining to flush without the protection of the journal.
1835 1.2 simonb */
1836 1.66 riastrad panic("wapbl_flush: current transaction too big to flush");
1837 1.2 simonb }
1838 1.2 simonb
1839 1.73 riastrad error = wapbl_truncate(wl, flushsize);
1840 1.2 simonb if (error)
1841 1.69 riastrad goto out;
1842 1.2 simonb
1843 1.2 simonb off = wl->wl_head;
1844 1.114 riastrad KASSERT(off == 0 || off >= wl->wl_circ_off);
1845 1.114 riastrad KASSERT(off == 0 || off < wl->wl_circ_off + wl->wl_circ_size);
1846 1.2 simonb error = wapbl_write_blocks(wl, &off);
1847 1.2 simonb if (error)
1848 1.69 riastrad goto out;
1849 1.2 simonb error = wapbl_write_revocations(wl, &off);
1850 1.2 simonb if (error)
1851 1.69 riastrad goto out;
1852 1.2 simonb error = wapbl_write_inodes(wl, &off);
1853 1.2 simonb if (error)
1854 1.69 riastrad goto out;
1855 1.2 simonb
1856 1.2 simonb reserved = 0;
1857 1.2 simonb if (wl->wl_inohashcnt)
1858 1.2 simonb reserved = wapbl_transaction_inodes_len(wl);
1859 1.2 simonb
1860 1.2 simonb head = wl->wl_head;
1861 1.2 simonb tail = wl->wl_tail;
1862 1.2 simonb
1863 1.2 simonb wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1864 1.2 simonb &head, &tail);
1865 1.72 riastrad
1866 1.72 riastrad KASSERTMSG(head == off,
1867 1.72 riastrad "lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1868 1.72 riastrad " off=%"PRIdMAX" flush=%zu",
1869 1.72 riastrad (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1870 1.72 riastrad flushsize);
1871 1.2 simonb
1872 1.2 simonb /* Opportunistically move the tail forward if we can */
1873 1.73 riastrad mutex_enter(&wl->wl_mtx);
1874 1.73 riastrad delta = wl->wl_reclaimable_bytes;
1875 1.73 riastrad mutex_exit(&wl->wl_mtx);
1876 1.73 riastrad wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1877 1.73 riastrad &head, &tail);
1878 1.2 simonb
1879 1.2 simonb error = wapbl_write_commit(wl, head, tail);
1880 1.2 simonb if (error)
1881 1.69 riastrad goto out;
1882 1.2 simonb
1883 1.51 para we = pool_get(&wapbl_entry_pool, PR_WAITOK);
1884 1.2 simonb
1885 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1886 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1887 1.114 riastrad ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1888 1.114 riastrad " unsynced=%zu"
1889 1.114 riastrad "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1890 1.114 riastrad "inodes=%d\n",
1891 1.114 riastrad curproc->p_pid, curlwp->l_lid, flushsize, delta,
1892 1.114 riastrad wapbl_space_used(wl->wl_circ_size, head, tail),
1893 1.114 riastrad wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1894 1.114 riastrad wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1895 1.114 riastrad wl->wl_inohashcnt));
1896 1.2 simonb #else
1897 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1898 1.114 riastrad ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1899 1.114 riastrad "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1900 1.114 riastrad "inodes=%d\n",
1901 1.114 riastrad curproc->p_pid, curlwp->l_lid, flushsize, delta,
1902 1.114 riastrad wapbl_space_used(wl->wl_circ_size, head, tail),
1903 1.114 riastrad wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1904 1.114 riastrad wl->wl_dealloccnt, wl->wl_inohashcnt));
1905 1.2 simonb #endif
1906 1.2 simonb
1907 1.2 simonb
1908 1.2 simonb mutex_enter(&bufcache_lock);
1909 1.2 simonb mutex_enter(&wl->wl_mtx);
1910 1.2 simonb
1911 1.2 simonb wl->wl_reserved_bytes = reserved;
1912 1.2 simonb wl->wl_head = head;
1913 1.2 simonb wl->wl_tail = tail;
1914 1.2 simonb KASSERT(wl->wl_reclaimable_bytes >= delta);
1915 1.2 simonb wl->wl_reclaimable_bytes -= delta;
1916 1.81 jdolecek KDASSERT(wl->wl_dealloccnt == 0);
1917 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1918 1.2 simonb wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1919 1.2 simonb #endif
1920 1.2 simonb
1921 1.2 simonb we->we_wapbl = wl;
1922 1.2 simonb we->we_bufcount = wl->wl_bufcount;
1923 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1924 1.2 simonb we->we_unsynced_bufbytes = wl->wl_bufbytes;
1925 1.2 simonb #endif
1926 1.2 simonb we->we_reclaimable_bytes = flushsize;
1927 1.2 simonb we->we_error = 0;
1928 1.2 simonb SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1929 1.2 simonb
1930 1.2 simonb /*
1931 1.94 jdolecek * This flushes bufs in order than they were queued, so the LRU
1932 1.94 jdolecek * order is preserved.
1933 1.2 simonb */
1934 1.94 jdolecek while ((bp = TAILQ_FIRST(&wl->wl_bufs)) != NULL) {
1935 1.2 simonb if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1936 1.2 simonb continue;
1937 1.2 simonb }
1938 1.2 simonb bp->b_iodone = wapbl_biodone;
1939 1.2 simonb bp->b_private = we;
1940 1.93 jdolecek
1941 1.2 simonb bremfree(bp);
1942 1.2 simonb wapbl_remove_buf_locked(wl, bp);
1943 1.2 simonb mutex_exit(&wl->wl_mtx);
1944 1.2 simonb mutex_exit(&bufcache_lock);
1945 1.2 simonb bawrite(bp);
1946 1.2 simonb mutex_enter(&bufcache_lock);
1947 1.2 simonb mutex_enter(&wl->wl_mtx);
1948 1.2 simonb }
1949 1.2 simonb mutex_exit(&wl->wl_mtx);
1950 1.2 simonb mutex_exit(&bufcache_lock);
1951 1.2 simonb
1952 1.2 simonb #if 0
1953 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1954 1.114 riastrad ("wapbl_flush thread %d.%d done flushing entries...\n",
1955 1.114 riastrad curproc->p_pid, curlwp->l_lid));
1956 1.2 simonb #endif
1957 1.2 simonb
1958 1.114 riastrad wait_out:
1959 1.2 simonb
1960 1.2 simonb /*
1961 1.2 simonb * If the waitfor flag is set, don't return until everything is
1962 1.2 simonb * fully flushed and the on disk log is empty.
1963 1.2 simonb */
1964 1.2 simonb if (waitfor) {
1965 1.91 riastrad error = wapbl_truncate(wl, wl->wl_circ_size -
1966 1.114 riastrad wl->wl_reserved_bytes);
1967 1.2 simonb }
1968 1.2 simonb
1969 1.114 riastrad out:
1970 1.2 simonb if (error) {
1971 1.81 jdolecek wl->wl_flush_abort(wl->wl_mount,
1972 1.86 jdolecek TAILQ_FIRST(&wl->wl_dealloclist));
1973 1.2 simonb }
1974 1.2 simonb
1975 1.2 simonb #ifdef WAPBL_DEBUG_PRINT
1976 1.2 simonb if (error) {
1977 1.2 simonb pid_t pid = -1;
1978 1.2 simonb lwpid_t lid = -1;
1979 1.2 simonb if (curproc)
1980 1.2 simonb pid = curproc->p_pid;
1981 1.2 simonb if (curlwp)
1982 1.2 simonb lid = curlwp->l_lid;
1983 1.2 simonb mutex_enter(&wl->wl_mtx);
1984 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1985 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1986 1.2 simonb ("wapbl_flush: thread %d.%d aborted flush: "
1987 1.114 riastrad "error = %d\n"
1988 1.114 riastrad "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1989 1.114 riastrad "deallocs=%d inodes=%d\n"
1990 1.114 riastrad "\terrcnt = %d, reclaimable=%zu reserved=%zu "
1991 1.114 riastrad "unsynced=%zu\n",
1992 1.114 riastrad pid, lid, error, wl->wl_bufcount,
1993 1.114 riastrad wl->wl_bufbytes, wl->wl_bcount,
1994 1.114 riastrad wl->wl_dealloccnt, wl->wl_inohashcnt,
1995 1.114 riastrad wl->wl_error_count, wl->wl_reclaimable_bytes,
1996 1.114 riastrad wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1997 1.2 simonb SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1998 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1999 1.2 simonb ("\tentry: bufcount = %zu, reclaimable = %zu, "
2000 1.114 riastrad "error = %d, unsynced = %zu\n",
2001 1.114 riastrad we->we_bufcount, we->we_reclaimable_bytes,
2002 1.114 riastrad we->we_error, we->we_unsynced_bufbytes));
2003 1.2 simonb }
2004 1.2 simonb #else
2005 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_ERROR,
2006 1.2 simonb ("wapbl_flush: thread %d.%d aborted flush: "
2007 1.114 riastrad "error = %d\n"
2008 1.114 riastrad "\tbufcount=%zu bufbytes=%zu bcount=%zu "
2009 1.114 riastrad "deallocs=%d inodes=%d\n"
2010 1.114 riastrad "\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
2011 1.114 riastrad pid, lid, error, wl->wl_bufcount,
2012 1.114 riastrad wl->wl_bufbytes, wl->wl_bcount,
2013 1.114 riastrad wl->wl_dealloccnt, wl->wl_inohashcnt,
2014 1.114 riastrad wl->wl_error_count, wl->wl_reclaimable_bytes,
2015 1.114 riastrad wl->wl_reserved_bytes));
2016 1.2 simonb SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
2017 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_ERROR,
2018 1.2 simonb ("\tentry: bufcount = %zu, reclaimable = %zu, "
2019 1.114 riastrad "error = %d\n", we->we_bufcount,
2020 1.114 riastrad we->we_reclaimable_bytes, we->we_error));
2021 1.2 simonb }
2022 1.2 simonb #endif
2023 1.2 simonb mutex_exit(&wl->wl_mtx);
2024 1.2 simonb }
2025 1.2 simonb #endif
2026 1.2 simonb
2027 1.2 simonb rw_exit(&wl->wl_rwlock);
2028 1.2 simonb return error;
2029 1.2 simonb }
2030 1.2 simonb
2031 1.2 simonb /****************************************************************/
2032 1.2 simonb
2033 1.2 simonb void
2034 1.2 simonb wapbl_jlock_assert(struct wapbl *wl)
2035 1.2 simonb {
2036 1.2 simonb
2037 1.23 ad KASSERT(rw_lock_held(&wl->wl_rwlock));
2038 1.2 simonb }
2039 1.2 simonb
2040 1.2 simonb void
2041 1.2 simonb wapbl_junlock_assert(struct wapbl *wl)
2042 1.2 simonb {
2043 1.2 simonb
2044 1.2 simonb KASSERT(!rw_write_held(&wl->wl_rwlock));
2045 1.2 simonb }
2046 1.2 simonb
2047 1.2 simonb /****************************************************************/
2048 1.2 simonb
2049 1.2 simonb /* locks missing */
2050 1.2 simonb void
2051 1.114 riastrad wapbl_print(struct wapbl *wl, int full, void (*pr)(const char *, ...))
2052 1.2 simonb {
2053 1.2 simonb struct buf *bp;
2054 1.2 simonb struct wapbl_entry *we;
2055 1.2 simonb (*pr)("wapbl %p", wl);
2056 1.2 simonb (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
2057 1.114 riastrad wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
2058 1.114 riastrad (*pr)("circ = %zu, header = %zu,"
2059 1.114 riastrad " head = %"PRIdMAX" tail = %"PRIdMAX"\n",
2060 1.114 riastrad wl->wl_circ_size, wl->wl_circ_off,
2061 1.114 riastrad (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
2062 1.2 simonb (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
2063 1.114 riastrad wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
2064 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
2065 1.2 simonb (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
2066 1.114 riastrad "reserved = %zu errcnt = %d unsynced = %zu\n",
2067 1.114 riastrad wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
2068 1.114 riastrad wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
2069 1.114 riastrad wl->wl_error_count, wl->wl_unsynced_bufbytes);
2070 1.2 simonb #else
2071 1.2 simonb (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
2072 1.114 riastrad "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
2073 1.114 riastrad wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
2074 1.114 riastrad wl->wl_error_count);
2075 1.2 simonb #endif
2076 1.2 simonb (*pr)("\tdealloccnt = %d, dealloclim = %d\n",
2077 1.114 riastrad wl->wl_dealloccnt, wl->wl_dealloclim);
2078 1.2 simonb (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
2079 1.114 riastrad wl->wl_inohashcnt, wl->wl_inohashmask);
2080 1.2 simonb (*pr)("entries:\n");
2081 1.2 simonb SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
2082 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
2083 1.2 simonb (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
2084 1.114 riastrad "unsynced = %zu\n",
2085 1.114 riastrad we->we_bufcount, we->we_reclaimable_bytes,
2086 1.114 riastrad we->we_error, we->we_unsynced_bufbytes);
2087 1.2 simonb #else
2088 1.2 simonb (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
2089 1.114 riastrad we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
2090 1.2 simonb #endif
2091 1.2 simonb }
2092 1.2 simonb if (full) {
2093 1.2 simonb int cnt = 0;
2094 1.2 simonb (*pr)("bufs =");
2095 1.94 jdolecek TAILQ_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
2096 1.94 jdolecek if (!TAILQ_NEXT(bp, b_wapbllist)) {
2097 1.2 simonb (*pr)(" %p", bp);
2098 1.2 simonb } else if ((++cnt % 6) == 0) {
2099 1.2 simonb (*pr)(" %p,\n\t", bp);
2100 1.2 simonb } else {
2101 1.2 simonb (*pr)(" %p,", bp);
2102 1.2 simonb }
2103 1.2 simonb }
2104 1.2 simonb (*pr)("\n");
2105 1.2 simonb
2106 1.2 simonb (*pr)("dealloced blks = ");
2107 1.2 simonb {
2108 1.81 jdolecek struct wapbl_dealloc *wd;
2109 1.2 simonb cnt = 0;
2110 1.86 jdolecek TAILQ_FOREACH(wd, &wl->wl_dealloclist, wd_entries) {
2111 1.2 simonb (*pr)(" %"PRId64":%d,",
2112 1.114 riastrad wd->wd_blkno,
2113 1.114 riastrad wd->wd_len);
2114 1.2 simonb if ((++cnt % 4) == 0) {
2115 1.2 simonb (*pr)("\n\t");
2116 1.2 simonb }
2117 1.2 simonb }
2118 1.2 simonb }
2119 1.2 simonb (*pr)("\n");
2120 1.2 simonb
2121 1.2 simonb (*pr)("registered inodes = ");
2122 1.2 simonb {
2123 1.2 simonb int i;
2124 1.2 simonb cnt = 0;
2125 1.2 simonb for (i = 0; i <= wl->wl_inohashmask; i++) {
2126 1.2 simonb struct wapbl_ino_head *wih;
2127 1.2 simonb struct wapbl_ino *wi;
2128 1.2 simonb
2129 1.2 simonb wih = &wl->wl_inohash[i];
2130 1.2 simonb LIST_FOREACH(wi, wih, wi_hash) {
2131 1.2 simonb if (wi->wi_ino == 0)
2132 1.2 simonb continue;
2133 1.55 christos (*pr)(" %"PRIu64"/0%06"PRIo32",",
2134 1.2 simonb wi->wi_ino, wi->wi_mode);
2135 1.2 simonb if ((++cnt % 4) == 0) {
2136 1.2 simonb (*pr)("\n\t");
2137 1.2 simonb }
2138 1.2 simonb }
2139 1.2 simonb }
2140 1.2 simonb (*pr)("\n");
2141 1.2 simonb }
2142 1.95 jdolecek
2143 1.95 jdolecek (*pr)("iobufs free =");
2144 1.95 jdolecek TAILQ_FOREACH(bp, &wl->wl_iobufs, b_wapbllist) {
2145 1.95 jdolecek if (!TAILQ_NEXT(bp, b_wapbllist)) {
2146 1.95 jdolecek (*pr)(" %p", bp);
2147 1.95 jdolecek } else if ((++cnt % 6) == 0) {
2148 1.95 jdolecek (*pr)(" %p,\n\t", bp);
2149 1.95 jdolecek } else {
2150 1.95 jdolecek (*pr)(" %p,", bp);
2151 1.95 jdolecek }
2152 1.95 jdolecek }
2153 1.95 jdolecek (*pr)("\n");
2154 1.95 jdolecek
2155 1.95 jdolecek (*pr)("iobufs busy =");
2156 1.95 jdolecek TAILQ_FOREACH(bp, &wl->wl_iobufs_busy, b_wapbllist) {
2157 1.95 jdolecek if (!TAILQ_NEXT(bp, b_wapbllist)) {
2158 1.95 jdolecek (*pr)(" %p", bp);
2159 1.95 jdolecek } else if ((++cnt % 6) == 0) {
2160 1.95 jdolecek (*pr)(" %p,\n\t", bp);
2161 1.95 jdolecek } else {
2162 1.95 jdolecek (*pr)(" %p,", bp);
2163 1.95 jdolecek }
2164 1.95 jdolecek }
2165 1.95 jdolecek (*pr)("\n");
2166 1.2 simonb }
2167 1.2 simonb }
2168 1.2 simonb
2169 1.2 simonb #if defined(WAPBL_DEBUG) || defined(DDB)
2170 1.2 simonb void
2171 1.2 simonb wapbl_dump(struct wapbl *wl)
2172 1.2 simonb {
2173 1.2 simonb #if defined(WAPBL_DEBUG)
2174 1.2 simonb if (!wl)
2175 1.2 simonb wl = wapbl_debug_wl;
2176 1.2 simonb #endif
2177 1.2 simonb if (!wl)
2178 1.2 simonb return;
2179 1.100 joerg wapbl_print(wl, 1, printf);
2180 1.2 simonb }
2181 1.2 simonb #endif
2182 1.2 simonb
2183 1.2 simonb /****************************************************************/
2184 1.2 simonb
2185 1.85 jdolecek int
2186 1.86 jdolecek wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len, bool force,
2187 1.86 jdolecek void **cookiep)
2188 1.2 simonb {
2189 1.81 jdolecek struct wapbl_dealloc *wd;
2190 1.85 jdolecek int error = 0;
2191 1.2 simonb
2192 1.2 simonb wapbl_jlock_assert(wl);
2193 1.2 simonb
2194 1.38 hannken mutex_enter(&wl->wl_mtx);
2195 1.85 jdolecek
2196 1.85 jdolecek if (__predict_false(wl->wl_dealloccnt >= wl->wl_dealloclim)) {
2197 1.85 jdolecek if (!force) {
2198 1.116 riastrad error = SET_ERROR(EAGAIN);
2199 1.85 jdolecek goto out;
2200 1.85 jdolecek }
2201 1.85 jdolecek
2202 1.85 jdolecek /*
2203 1.85 jdolecek * Forced registration can only be used when:
2204 1.85 jdolecek * 1) the caller can't cope with failure
2205 1.85 jdolecek * 2) the path can be triggered only bounded, small
2206 1.85 jdolecek * times per transaction
2207 1.85 jdolecek * If this is not fullfilled, and the path would be triggered
2208 1.85 jdolecek * many times, this could overflow maximum transaction size
2209 1.85 jdolecek * and panic later.
2210 1.85 jdolecek */
2211 1.114 riastrad printf("%s: forced dealloc registration over limit:"
2212 1.114 riastrad " %d >= %d\n",
2213 1.114 riastrad wl->wl_mount->mnt_stat.f_mntonname,
2214 1.114 riastrad wl->wl_dealloccnt, wl->wl_dealloclim);
2215 1.85 jdolecek }
2216 1.27 pooka
2217 1.84 jdolecek wl->wl_dealloccnt++;
2218 1.84 jdolecek mutex_exit(&wl->wl_mtx);
2219 1.84 jdolecek
2220 1.81 jdolecek wd = pool_get(&wapbl_dealloc_pool, PR_WAITOK);
2221 1.81 jdolecek wd->wd_blkno = blk;
2222 1.81 jdolecek wd->wd_len = len;
2223 1.81 jdolecek
2224 1.84 jdolecek mutex_enter(&wl->wl_mtx);
2225 1.86 jdolecek TAILQ_INSERT_TAIL(&wl->wl_dealloclist, wd, wd_entries);
2226 1.86 jdolecek
2227 1.86 jdolecek if (cookiep)
2228 1.86 jdolecek *cookiep = wd;
2229 1.85 jdolecek
2230 1.114 riastrad out:
2231 1.84 jdolecek mutex_exit(&wl->wl_mtx);
2232 1.81 jdolecek
2233 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
2234 1.85 jdolecek ("wapbl_register_deallocation: blk=%"PRId64" len=%d error=%d\n",
2235 1.114 riastrad blk, len, error));
2236 1.85 jdolecek
2237 1.85 jdolecek return error;
2238 1.2 simonb }
2239 1.2 simonb
2240 1.86 jdolecek static void
2241 1.86 jdolecek wapbl_deallocation_free(struct wapbl *wl, struct wapbl_dealloc *wd,
2242 1.86 jdolecek bool locked)
2243 1.86 jdolecek {
2244 1.114 riastrad
2245 1.86 jdolecek KASSERT(!locked
2246 1.86 jdolecek || rw_lock_held(&wl->wl_rwlock) || mutex_owned(&wl->wl_mtx));
2247 1.86 jdolecek
2248 1.86 jdolecek if (!locked)
2249 1.86 jdolecek mutex_enter(&wl->wl_mtx);
2250 1.86 jdolecek
2251 1.86 jdolecek TAILQ_REMOVE(&wl->wl_dealloclist, wd, wd_entries);
2252 1.86 jdolecek wl->wl_dealloccnt--;
2253 1.86 jdolecek
2254 1.86 jdolecek if (!locked)
2255 1.86 jdolecek mutex_exit(&wl->wl_mtx);
2256 1.86 jdolecek
2257 1.86 jdolecek pool_put(&wapbl_dealloc_pool, wd);
2258 1.86 jdolecek }
2259 1.86 jdolecek
2260 1.86 jdolecek void
2261 1.86 jdolecek wapbl_unregister_deallocation(struct wapbl *wl, void *cookie)
2262 1.86 jdolecek {
2263 1.114 riastrad
2264 1.86 jdolecek KASSERT(cookie != NULL);
2265 1.86 jdolecek wapbl_deallocation_free(wl, cookie, false);
2266 1.86 jdolecek }
2267 1.86 jdolecek
2268 1.2 simonb /****************************************************************/
2269 1.2 simonb
2270 1.2 simonb static void
2271 1.2 simonb wapbl_inodetrk_init(struct wapbl *wl, u_int size)
2272 1.2 simonb {
2273 1.2 simonb
2274 1.2 simonb wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
2275 1.2 simonb if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) {
2276 1.2 simonb pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0,
2277 1.2 simonb "wapblinopl", &pool_allocator_nointr, IPL_NONE);
2278 1.2 simonb }
2279 1.2 simonb }
2280 1.2 simonb
2281 1.2 simonb static void
2282 1.2 simonb wapbl_inodetrk_free(struct wapbl *wl)
2283 1.2 simonb {
2284 1.2 simonb
2285 1.2 simonb /* XXX this KASSERT needs locking/mutex analysis */
2286 1.2 simonb KASSERT(wl->wl_inohashcnt == 0);
2287 1.2 simonb hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
2288 1.112 riastrad membar_release();
2289 1.2 simonb if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) {
2290 1.112 riastrad membar_acquire();
2291 1.2 simonb pool_destroy(&wapbl_ino_pool);
2292 1.2 simonb }
2293 1.2 simonb }
2294 1.2 simonb
2295 1.2 simonb static struct wapbl_ino *
2296 1.2 simonb wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
2297 1.2 simonb {
2298 1.2 simonb struct wapbl_ino_head *wih;
2299 1.2 simonb struct wapbl_ino *wi;
2300 1.2 simonb
2301 1.2 simonb KASSERT(mutex_owned(&wl->wl_mtx));
2302 1.2 simonb
2303 1.2 simonb wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
2304 1.2 simonb LIST_FOREACH(wi, wih, wi_hash) {
2305 1.2 simonb if (ino == wi->wi_ino)
2306 1.2 simonb return wi;
2307 1.2 simonb }
2308 1.2 simonb return 0;
2309 1.2 simonb }
2310 1.2 simonb
2311 1.2 simonb void
2312 1.2 simonb wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
2313 1.2 simonb {
2314 1.2 simonb struct wapbl_ino_head *wih;
2315 1.2 simonb struct wapbl_ino *wi;
2316 1.2 simonb
2317 1.2 simonb wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
2318 1.2 simonb
2319 1.2 simonb mutex_enter(&wl->wl_mtx);
2320 1.2 simonb if (wapbl_inodetrk_get(wl, ino) == NULL) {
2321 1.2 simonb wi->wi_ino = ino;
2322 1.2 simonb wi->wi_mode = mode;
2323 1.2 simonb wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
2324 1.2 simonb LIST_INSERT_HEAD(wih, wi, wi_hash);
2325 1.2 simonb wl->wl_inohashcnt++;
2326 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_INODE,
2327 1.2 simonb ("wapbl_register_inode: ino=%"PRId64"\n", ino));
2328 1.2 simonb mutex_exit(&wl->wl_mtx);
2329 1.2 simonb } else {
2330 1.2 simonb mutex_exit(&wl->wl_mtx);
2331 1.2 simonb pool_put(&wapbl_ino_pool, wi);
2332 1.2 simonb }
2333 1.2 simonb }
2334 1.2 simonb
2335 1.2 simonb void
2336 1.2 simonb wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
2337 1.2 simonb {
2338 1.2 simonb struct wapbl_ino *wi;
2339 1.2 simonb
2340 1.2 simonb mutex_enter(&wl->wl_mtx);
2341 1.2 simonb wi = wapbl_inodetrk_get(wl, ino);
2342 1.2 simonb if (wi) {
2343 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_INODE,
2344 1.2 simonb ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
2345 1.2 simonb KASSERT(wl->wl_inohashcnt > 0);
2346 1.2 simonb wl->wl_inohashcnt--;
2347 1.2 simonb LIST_REMOVE(wi, wi_hash);
2348 1.2 simonb mutex_exit(&wl->wl_mtx);
2349 1.2 simonb
2350 1.2 simonb pool_put(&wapbl_ino_pool, wi);
2351 1.2 simonb } else {
2352 1.2 simonb mutex_exit(&wl->wl_mtx);
2353 1.2 simonb }
2354 1.2 simonb }
2355 1.2 simonb
2356 1.2 simonb /****************************************************************/
2357 1.2 simonb
2358 1.71 riastrad /*
2359 1.71 riastrad * wapbl_transaction_inodes_len(wl)
2360 1.71 riastrad *
2361 1.71 riastrad * Calculate the number of bytes required for inode registration
2362 1.71 riastrad * log records in wl.
2363 1.71 riastrad */
2364 1.30 uebayasi static inline size_t
2365 1.2 simonb wapbl_transaction_inodes_len(struct wapbl *wl)
2366 1.2 simonb {
2367 1.2 simonb int blocklen = 1<<wl->wl_log_dev_bshift;
2368 1.2 simonb int iph;
2369 1.2 simonb
2370 1.2 simonb /* Calculate number of inodes described in a inodelist header */
2371 1.2 simonb iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2372 1.2 simonb sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2373 1.2 simonb
2374 1.2 simonb KASSERT(iph > 0);
2375 1.2 simonb
2376 1.39 christos return MAX(1, howmany(wl->wl_inohashcnt, iph)) * blocklen;
2377 1.2 simonb }
2378 1.2 simonb
2379 1.2 simonb
2380 1.71 riastrad /*
2381 1.71 riastrad * wapbl_transaction_len(wl)
2382 1.71 riastrad *
2383 1.71 riastrad * Calculate number of bytes required for all log records in wl.
2384 1.71 riastrad */
2385 1.2 simonb static size_t
2386 1.2 simonb wapbl_transaction_len(struct wapbl *wl)
2387 1.2 simonb {
2388 1.2 simonb int blocklen = 1<<wl->wl_log_dev_bshift;
2389 1.2 simonb size_t len;
2390 1.2 simonb
2391 1.80 jdolecek /* Calculate number of blocks described in a blocklist header */
2392 1.2 simonb len = wl->wl_bcount;
2393 1.79 jdolecek len += howmany(wl->wl_bufcount, wl->wl_brperjblock) * blocklen;
2394 1.79 jdolecek len += howmany(wl->wl_dealloccnt, wl->wl_brperjblock) * blocklen;
2395 1.2 simonb len += wapbl_transaction_inodes_len(wl);
2396 1.2 simonb
2397 1.2 simonb return len;
2398 1.2 simonb }
2399 1.2 simonb
2400 1.2 simonb /*
2401 1.71 riastrad * wapbl_cache_sync(wl, msg)
2402 1.71 riastrad *
2403 1.71 riastrad * Issue DIOCCACHESYNC to wl->wl_devvp.
2404 1.71 riastrad *
2405 1.71 riastrad * If sysctl(vfs.wapbl.verbose_commit) >= 2, print a message
2406 1.71 riastrad * including msg about the duration of the cache sync.
2407 1.48 yamt */
2408 1.48 yamt static int
2409 1.48 yamt wapbl_cache_sync(struct wapbl *wl, const char *msg)
2410 1.48 yamt {
2411 1.48 yamt const bool verbose = wapbl_verbose_commit >= 2;
2412 1.48 yamt struct bintime start_time;
2413 1.48 yamt int force = 1;
2414 1.48 yamt int error;
2415 1.48 yamt
2416 1.101 jdolecek /* Skip full cache sync if disabled */
2417 1.101 jdolecek if (!wapbl_flush_disk_cache) {
2418 1.48 yamt return 0;
2419 1.48 yamt }
2420 1.48 yamt if (verbose) {
2421 1.48 yamt bintime(&start_time);
2422 1.48 yamt }
2423 1.48 yamt error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force,
2424 1.48 yamt FWRITE, FSCRED);
2425 1.48 yamt if (error) {
2426 1.48 yamt WAPBL_PRINTF(WAPBL_PRINT_ERROR,
2427 1.76 riastrad ("wapbl_cache_sync: DIOCCACHESYNC on dev 0x%jx "
2428 1.114 riastrad "returned %d\n", (uintmax_t)wl->wl_devvp->v_rdev,
2429 1.114 riastrad error));
2430 1.48 yamt }
2431 1.48 yamt if (verbose) {
2432 1.48 yamt struct bintime d;
2433 1.48 yamt struct timespec ts;
2434 1.48 yamt
2435 1.48 yamt bintime(&d);
2436 1.48 yamt bintime_sub(&d, &start_time);
2437 1.48 yamt bintime2timespec(&d, &ts);
2438 1.48 yamt printf("wapbl_cache_sync: %s: dev 0x%jx %ju.%09lu\n",
2439 1.48 yamt msg, (uintmax_t)wl->wl_devvp->v_rdev,
2440 1.48 yamt (uintmax_t)ts.tv_sec, ts.tv_nsec);
2441 1.48 yamt }
2442 1.87 jdolecek
2443 1.87 jdolecek wl->wl_ev_cacheflush.ev_count++;
2444 1.87 jdolecek
2445 1.48 yamt return error;
2446 1.48 yamt }
2447 1.48 yamt
2448 1.48 yamt /*
2449 1.71 riastrad * wapbl_write_commit(wl, head, tail)
2450 1.71 riastrad *
2451 1.71 riastrad * Issue a disk cache sync to wait for all pending writes to the
2452 1.71 riastrad * log to complete, and then synchronously commit the current
2453 1.71 riastrad * circular queue head and tail to the log, in the next of two
2454 1.71 riastrad * locations for commit headers on disk.
2455 1.2 simonb *
2456 1.71 riastrad * Increment the generation number. If the generation number
2457 1.71 riastrad * rolls over to zero, then a subsequent commit would appear to
2458 1.71 riastrad * have an older generation than this one -- in that case, issue a
2459 1.71 riastrad * duplicate commit to avoid this.
2460 1.71 riastrad *
2461 1.71 riastrad * => Caller must have exclusive access to wl, either by holding
2462 1.71 riastrad * wl->wl_rwlock for writer or by being wapbl_start before anyone
2463 1.71 riastrad * else has seen wl.
2464 1.2 simonb */
2465 1.2 simonb static int
2466 1.2 simonb wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
2467 1.2 simonb {
2468 1.2 simonb struct wapbl_wc_header *wc = wl->wl_wc_header;
2469 1.2 simonb struct timespec ts;
2470 1.2 simonb int error;
2471 1.34 mlelstv daddr_t pbn;
2472 1.2 simonb
2473 1.95 jdolecek error = wapbl_buffered_flush(wl, true);
2474 1.54 hannken if (error)
2475 1.54 hannken return error;
2476 1.49 yamt /*
2477 1.101 jdolecek * Flush disk cache to ensure that blocks we've written are actually
2478 1.49 yamt * written to the stable storage before the commit header.
2479 1.101 jdolecek * This flushes to disk not only journal blocks, but also all
2480 1.101 jdolecek * metadata blocks, written asynchronously since previous commit.
2481 1.49 yamt *
2482 1.49 yamt * XXX Calc checksum here, instead we do this for now
2483 1.49 yamt */
2484 1.48 yamt wapbl_cache_sync(wl, "1");
2485 1.2 simonb
2486 1.2 simonb wc->wc_head = head;
2487 1.2 simonb wc->wc_tail = tail;
2488 1.2 simonb wc->wc_checksum = 0;
2489 1.2 simonb wc->wc_version = 1;
2490 1.2 simonb getnanotime(&ts);
2491 1.17 yamt wc->wc_time = ts.tv_sec;
2492 1.2 simonb wc->wc_timensec = ts.tv_nsec;
2493 1.2 simonb
2494 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2495 1.2 simonb ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
2496 1.114 riastrad (intmax_t)head, (intmax_t)tail));
2497 1.2 simonb
2498 1.2 simonb /*
2499 1.49 yamt * write the commit header.
2500 1.49 yamt *
2501 1.2 simonb * XXX if generation will rollover, then first zero
2502 1.2 simonb * over second commit header before trying to write both headers.
2503 1.2 simonb */
2504 1.2 simonb
2505 1.34 mlelstv pbn = wl->wl_logpbn + (wc->wc_generation % 2);
2506 1.34 mlelstv #ifdef _KERNEL
2507 1.34 mlelstv pbn = btodb(pbn << wc->wc_log_dev_bshift);
2508 1.34 mlelstv #endif
2509 1.114 riastrad error = wapbl_buffered_write(wc, wc->wc_len, wl, pbn,
2510 1.114 riastrad WAPBL_JFLAGS(wl));
2511 1.54 hannken if (error)
2512 1.54 hannken return error;
2513 1.95 jdolecek error = wapbl_buffered_flush(wl, true);
2514 1.2 simonb if (error)
2515 1.2 simonb return error;
2516 1.2 simonb
2517 1.49 yamt /*
2518 1.101 jdolecek * Flush disk cache to ensure that the commit header is actually
2519 1.101 jdolecek * written before meta data blocks. Commit block is written using
2520 1.101 jdolecek * FUA when enabled, in that case this flush is not needed.
2521 1.49 yamt */
2522 1.101 jdolecek if (!WAPBL_USE_FUA(wl))
2523 1.101 jdolecek wapbl_cache_sync(wl, "2");
2524 1.2 simonb
2525 1.2 simonb /*
2526 1.2 simonb * If the generation number was zero, write it out a second time.
2527 1.2 simonb * This handles initialization and generation number rollover
2528 1.2 simonb */
2529 1.2 simonb if (wc->wc_generation++ == 0) {
2530 1.2 simonb error = wapbl_write_commit(wl, head, tail);
2531 1.2 simonb /*
2532 1.2 simonb * This panic should be able to be removed if we do the
2533 1.2 simonb * zero'ing mentioned above, and we are certain to roll
2534 1.2 simonb * back generation number on failure.
2535 1.2 simonb */
2536 1.114 riastrad if (error) {
2537 1.2 simonb panic("wapbl_write_commit: error writing duplicate "
2538 1.114 riastrad "log header: %d", error);
2539 1.114 riastrad }
2540 1.2 simonb }
2541 1.87 jdolecek
2542 1.87 jdolecek wl->wl_ev_commit.ev_count++;
2543 1.87 jdolecek
2544 1.2 simonb return 0;
2545 1.2 simonb }
2546 1.2 simonb
2547 1.71 riastrad /*
2548 1.71 riastrad * wapbl_write_blocks(wl, offp)
2549 1.71 riastrad *
2550 1.71 riastrad * Write all pending physical blocks in the current transaction
2551 1.71 riastrad * from wapbl_add_buf to the log on disk, adding to the circular
2552 1.71 riastrad * queue head at byte offset *offp, and returning the new head's
2553 1.71 riastrad * byte offset in *offp.
2554 1.71 riastrad */
2555 1.2 simonb static int
2556 1.2 simonb wapbl_write_blocks(struct wapbl *wl, off_t *offp)
2557 1.2 simonb {
2558 1.2 simonb struct wapbl_wc_blocklist *wc =
2559 1.2 simonb (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2560 1.2 simonb int blocklen = 1<<wl->wl_log_dev_bshift;
2561 1.2 simonb struct buf *bp;
2562 1.2 simonb off_t off = *offp;
2563 1.2 simonb int error;
2564 1.7 joerg size_t padding;
2565 1.2 simonb
2566 1.2 simonb KASSERT(rw_write_held(&wl->wl_rwlock));
2567 1.2 simonb
2568 1.94 jdolecek bp = TAILQ_FIRST(&wl->wl_bufs);
2569 1.2 simonb
2570 1.2 simonb while (bp) {
2571 1.2 simonb int cnt;
2572 1.2 simonb struct buf *obp = bp;
2573 1.2 simonb
2574 1.2 simonb KASSERT(bp->b_flags & B_LOCKED);
2575 1.2 simonb
2576 1.2 simonb wc->wc_type = WAPBL_WC_BLOCKS;
2577 1.2 simonb wc->wc_len = blocklen;
2578 1.2 simonb wc->wc_blkcount = 0;
2579 1.109 chs wc->wc_unused = 0;
2580 1.114 riastrad while (bp && wc->wc_blkcount < wl->wl_brperjblock) {
2581 1.2 simonb /*
2582 1.2 simonb * Make sure all the physical block numbers are up to
2583 1.2 simonb * date. If this is not always true on a given
2584 1.2 simonb * filesystem, then VOP_BMAP must be called. We
2585 1.2 simonb * could call VOP_BMAP here, or else in the filesystem
2586 1.2 simonb * specific flush callback, although neither of those
2587 1.2 simonb * solutions allow us to take the vnode lock. If a
2588 1.2 simonb * filesystem requires that we must take the vnode lock
2589 1.2 simonb * to call VOP_BMAP, then we can probably do it in
2590 1.2 simonb * bwrite when the vnode lock should already be held
2591 1.2 simonb * by the invoking code.
2592 1.2 simonb */
2593 1.114 riastrad KASSERT(bp->b_vp->v_type == VBLK ||
2594 1.114 riastrad bp->b_blkno != bp->b_lblkno);
2595 1.2 simonb KASSERT(bp->b_blkno > 0);
2596 1.2 simonb
2597 1.2 simonb wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
2598 1.2 simonb wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
2599 1.2 simonb wc->wc_len += bp->b_bcount;
2600 1.2 simonb wc->wc_blkcount++;
2601 1.94 jdolecek bp = TAILQ_NEXT(bp, b_wapbllist);
2602 1.2 simonb }
2603 1.7 joerg if (wc->wc_len % blocklen != 0) {
2604 1.7 joerg padding = blocklen - wc->wc_len % blocklen;
2605 1.7 joerg wc->wc_len += padding;
2606 1.7 joerg } else {
2607 1.7 joerg padding = 0;
2608 1.7 joerg }
2609 1.7 joerg
2610 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2611 1.114 riastrad ("wapbl_write_blocks:"
2612 1.114 riastrad " len = %u (padding %zu) off = %"PRIdMAX"\n",
2613 1.114 riastrad wc->wc_len, padding, (intmax_t)off));
2614 1.2 simonb
2615 1.2 simonb error = wapbl_circ_write(wl, wc, blocklen, &off);
2616 1.2 simonb if (error)
2617 1.2 simonb return error;
2618 1.2 simonb bp = obp;
2619 1.2 simonb cnt = 0;
2620 1.114 riastrad while (bp && cnt++ < wl->wl_brperjblock) {
2621 1.2 simonb error = wapbl_circ_write(wl, bp->b_data,
2622 1.2 simonb bp->b_bcount, &off);
2623 1.2 simonb if (error)
2624 1.2 simonb return error;
2625 1.94 jdolecek bp = TAILQ_NEXT(bp, b_wapbllist);
2626 1.2 simonb }
2627 1.7 joerg if (padding) {
2628 1.7 joerg void *zero;
2629 1.91 riastrad
2630 1.51 para zero = wapbl_alloc(padding);
2631 1.7 joerg memset(zero, 0, padding);
2632 1.7 joerg error = wapbl_circ_write(wl, zero, padding, &off);
2633 1.18 yamt wapbl_free(zero, padding);
2634 1.7 joerg if (error)
2635 1.7 joerg return error;
2636 1.7 joerg }
2637 1.2 simonb }
2638 1.2 simonb *offp = off;
2639 1.2 simonb return 0;
2640 1.2 simonb }
2641 1.2 simonb
2642 1.71 riastrad /*
2643 1.71 riastrad * wapbl_write_revocations(wl, offp)
2644 1.71 riastrad *
2645 1.71 riastrad * Write all pending deallocations in the current transaction from
2646 1.71 riastrad * wapbl_register_deallocation to the log on disk, adding to the
2647 1.71 riastrad * circular queue's head at byte offset *offp, and returning the
2648 1.71 riastrad * new head's byte offset in *offp.
2649 1.71 riastrad */
2650 1.2 simonb static int
2651 1.2 simonb wapbl_write_revocations(struct wapbl *wl, off_t *offp)
2652 1.2 simonb {
2653 1.2 simonb struct wapbl_wc_blocklist *wc =
2654 1.2 simonb (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2655 1.81 jdolecek struct wapbl_dealloc *wd, *lwd;
2656 1.2 simonb int blocklen = 1<<wl->wl_log_dev_bshift;
2657 1.2 simonb off_t off = *offp;
2658 1.2 simonb int error;
2659 1.2 simonb
2660 1.89 riastrad KASSERT(rw_write_held(&wl->wl_rwlock));
2661 1.89 riastrad
2662 1.2 simonb if (wl->wl_dealloccnt == 0)
2663 1.2 simonb return 0;
2664 1.2 simonb
2665 1.86 jdolecek while ((wd = TAILQ_FIRST(&wl->wl_dealloclist)) != NULL) {
2666 1.2 simonb wc->wc_type = WAPBL_WC_REVOCATIONS;
2667 1.2 simonb wc->wc_len = blocklen;
2668 1.2 simonb wc->wc_blkcount = 0;
2669 1.109 chs wc->wc_unused = 0;
2670 1.114 riastrad while (wd && wc->wc_blkcount < wl->wl_brperjblock) {
2671 1.2 simonb wc->wc_blocks[wc->wc_blkcount].wc_daddr =
2672 1.81 jdolecek wd->wd_blkno;
2673 1.2 simonb wc->wc_blocks[wc->wc_blkcount].wc_dlen =
2674 1.81 jdolecek wd->wd_len;
2675 1.2 simonb wc->wc_blkcount++;
2676 1.81 jdolecek
2677 1.86 jdolecek wd = TAILQ_NEXT(wd, wd_entries);
2678 1.2 simonb }
2679 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2680 1.2 simonb ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
2681 1.114 riastrad wc->wc_len, (intmax_t)off));
2682 1.2 simonb error = wapbl_circ_write(wl, wc, blocklen, &off);
2683 1.2 simonb if (error)
2684 1.2 simonb return error;
2685 1.81 jdolecek
2686 1.81 jdolecek /* free all successfully written deallocs */
2687 1.81 jdolecek lwd = wd;
2688 1.86 jdolecek while ((wd = TAILQ_FIRST(&wl->wl_dealloclist)) != NULL) {
2689 1.83 jdolecek if (wd == lwd)
2690 1.83 jdolecek break;
2691 1.86 jdolecek wapbl_deallocation_free(wl, wd, true);
2692 1.81 jdolecek }
2693 1.2 simonb }
2694 1.2 simonb *offp = off;
2695 1.2 simonb return 0;
2696 1.2 simonb }
2697 1.2 simonb
2698 1.71 riastrad /*
2699 1.71 riastrad * wapbl_write_inodes(wl, offp)
2700 1.71 riastrad *
2701 1.71 riastrad * Write all pending inode allocations in the current transaction
2702 1.71 riastrad * from wapbl_register_inode to the log on disk, adding to the
2703 1.71 riastrad * circular queue's head at byte offset *offp and returning the
2704 1.71 riastrad * new head's byte offset in *offp.
2705 1.71 riastrad */
2706 1.2 simonb static int
2707 1.2 simonb wapbl_write_inodes(struct wapbl *wl, off_t *offp)
2708 1.2 simonb {
2709 1.2 simonb struct wapbl_wc_inodelist *wc =
2710 1.2 simonb (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
2711 1.2 simonb int i;
2712 1.14 joerg int blocklen = 1 << wl->wl_log_dev_bshift;
2713 1.2 simonb off_t off = *offp;
2714 1.2 simonb int error;
2715 1.2 simonb
2716 1.2 simonb struct wapbl_ino_head *wih;
2717 1.2 simonb struct wapbl_ino *wi;
2718 1.2 simonb int iph;
2719 1.2 simonb
2720 1.2 simonb iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2721 1.2 simonb sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2722 1.2 simonb
2723 1.2 simonb i = 0;
2724 1.2 simonb wih = &wl->wl_inohash[0];
2725 1.2 simonb wi = 0;
2726 1.2 simonb do {
2727 1.2 simonb wc->wc_type = WAPBL_WC_INODES;
2728 1.2 simonb wc->wc_len = blocklen;
2729 1.2 simonb wc->wc_inocnt = 0;
2730 1.2 simonb wc->wc_clear = (i == 0);
2731 1.114 riastrad while (i < wl->wl_inohashcnt && wc->wc_inocnt < iph) {
2732 1.2 simonb while (!wi) {
2733 1.2 simonb KASSERT((wih - &wl->wl_inohash[0])
2734 1.2 simonb <= wl->wl_inohashmask);
2735 1.2 simonb wi = LIST_FIRST(wih++);
2736 1.2 simonb }
2737 1.2 simonb wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2738 1.2 simonb wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2739 1.2 simonb wc->wc_inocnt++;
2740 1.2 simonb i++;
2741 1.2 simonb wi = LIST_NEXT(wi, wi_hash);
2742 1.2 simonb }
2743 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2744 1.2 simonb ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2745 1.114 riastrad wc->wc_len, (intmax_t)off));
2746 1.2 simonb error = wapbl_circ_write(wl, wc, blocklen, &off);
2747 1.2 simonb if (error)
2748 1.2 simonb return error;
2749 1.2 simonb } while (i < wl->wl_inohashcnt);
2750 1.91 riastrad
2751 1.2 simonb *offp = off;
2752 1.2 simonb return 0;
2753 1.2 simonb }
2754 1.2 simonb
2755 1.2 simonb #endif /* _KERNEL */
2756 1.2 simonb
2757 1.2 simonb /****************************************************************/
2758 1.2 simonb
2759 1.2 simonb struct wapbl_blk {
2760 1.2 simonb LIST_ENTRY(wapbl_blk) wb_hash;
2761 1.2 simonb daddr_t wb_blk;
2762 1.2 simonb off_t wb_off; /* Offset of this block in the log */
2763 1.2 simonb };
2764 1.2 simonb #define WAPBL_BLKPOOL_MIN 83
2765 1.2 simonb
2766 1.2 simonb static void
2767 1.2 simonb wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2768 1.2 simonb {
2769 1.114 riastrad
2770 1.2 simonb if (size < WAPBL_BLKPOOL_MIN)
2771 1.2 simonb size = WAPBL_BLKPOOL_MIN;
2772 1.2 simonb KASSERT(wr->wr_blkhash == 0);
2773 1.2 simonb #ifdef _KERNEL
2774 1.2 simonb wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2775 1.2 simonb #else /* ! _KERNEL */
2776 1.2 simonb /* Manually implement hashinit */
2777 1.2 simonb {
2778 1.25 lukem unsigned long i, hashsize;
2779 1.114 riastrad
2780 1.2 simonb for (hashsize = 1; hashsize < size; hashsize <<= 1)
2781 1.2 simonb continue;
2782 1.114 riastrad wr->wr_blkhash = wapbl_alloc(hashsize *
2783 1.114 riastrad sizeof(*wr->wr_blkhash));
2784 1.37 drochner for (i = 0; i < hashsize; i++)
2785 1.2 simonb LIST_INIT(&wr->wr_blkhash[i]);
2786 1.2 simonb wr->wr_blkhashmask = hashsize - 1;
2787 1.2 simonb }
2788 1.2 simonb #endif /* ! _KERNEL */
2789 1.2 simonb }
2790 1.2 simonb
2791 1.2 simonb static void
2792 1.2 simonb wapbl_blkhash_free(struct wapbl_replay *wr)
2793 1.2 simonb {
2794 1.114 riastrad
2795 1.2 simonb KASSERT(wr->wr_blkhashcnt == 0);
2796 1.2 simonb #ifdef _KERNEL
2797 1.2 simonb hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2798 1.2 simonb #else /* ! _KERNEL */
2799 1.18 yamt wapbl_free(wr->wr_blkhash,
2800 1.18 yamt (wr->wr_blkhashmask + 1) * sizeof(*wr->wr_blkhash));
2801 1.2 simonb #endif /* ! _KERNEL */
2802 1.2 simonb }
2803 1.2 simonb
2804 1.2 simonb static struct wapbl_blk *
2805 1.2 simonb wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2806 1.2 simonb {
2807 1.2 simonb struct wapbl_blk_head *wbh;
2808 1.2 simonb struct wapbl_blk *wb;
2809 1.114 riastrad
2810 1.2 simonb wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2811 1.2 simonb LIST_FOREACH(wb, wbh, wb_hash) {
2812 1.2 simonb if (blk == wb->wb_blk)
2813 1.2 simonb return wb;
2814 1.2 simonb }
2815 1.2 simonb return 0;
2816 1.2 simonb }
2817 1.2 simonb
2818 1.2 simonb static void
2819 1.2 simonb wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2820 1.2 simonb {
2821 1.2 simonb struct wapbl_blk_head *wbh;
2822 1.2 simonb struct wapbl_blk *wb;
2823 1.114 riastrad
2824 1.2 simonb wb = wapbl_blkhash_get(wr, blk);
2825 1.2 simonb if (wb) {
2826 1.2 simonb KASSERT(wb->wb_blk == blk);
2827 1.2 simonb wb->wb_off = off;
2828 1.2 simonb } else {
2829 1.51 para wb = wapbl_alloc(sizeof(*wb));
2830 1.2 simonb wb->wb_blk = blk;
2831 1.2 simonb wb->wb_off = off;
2832 1.2 simonb wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2833 1.2 simonb LIST_INSERT_HEAD(wbh, wb, wb_hash);
2834 1.2 simonb wr->wr_blkhashcnt++;
2835 1.2 simonb }
2836 1.2 simonb }
2837 1.2 simonb
2838 1.2 simonb static void
2839 1.2 simonb wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2840 1.2 simonb {
2841 1.2 simonb struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2842 1.114 riastrad
2843 1.2 simonb if (wb) {
2844 1.2 simonb KASSERT(wr->wr_blkhashcnt > 0);
2845 1.2 simonb wr->wr_blkhashcnt--;
2846 1.2 simonb LIST_REMOVE(wb, wb_hash);
2847 1.18 yamt wapbl_free(wb, sizeof(*wb));
2848 1.2 simonb }
2849 1.2 simonb }
2850 1.2 simonb
2851 1.2 simonb static void
2852 1.2 simonb wapbl_blkhash_clear(struct wapbl_replay *wr)
2853 1.2 simonb {
2854 1.25 lukem unsigned long i;
2855 1.114 riastrad
2856 1.2 simonb for (i = 0; i <= wr->wr_blkhashmask; i++) {
2857 1.2 simonb struct wapbl_blk *wb;
2858 1.2 simonb
2859 1.2 simonb while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2860 1.2 simonb KASSERT(wr->wr_blkhashcnt > 0);
2861 1.2 simonb wr->wr_blkhashcnt--;
2862 1.2 simonb LIST_REMOVE(wb, wb_hash);
2863 1.18 yamt wapbl_free(wb, sizeof(*wb));
2864 1.2 simonb }
2865 1.2 simonb }
2866 1.2 simonb KASSERT(wr->wr_blkhashcnt == 0);
2867 1.2 simonb }
2868 1.2 simonb
2869 1.2 simonb /****************************************************************/
2870 1.2 simonb
2871 1.71 riastrad /*
2872 1.71 riastrad * wapbl_circ_read(wr, data, len, offp)
2873 1.71 riastrad *
2874 1.71 riastrad * Read len bytes into data from the circular queue of wr,
2875 1.71 riastrad * starting at the linear byte offset *offp, and returning the new
2876 1.71 riastrad * linear byte offset in *offp.
2877 1.71 riastrad *
2878 1.71 riastrad * If the starting linear byte offset precedes wr->wr_circ_off,
2879 1.71 riastrad * the read instead begins at wr->wr_circ_off. XXX WTF? This
2880 1.71 riastrad * should be a KASSERT, not a conditional.
2881 1.71 riastrad */
2882 1.2 simonb static int
2883 1.2 simonb wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2884 1.2 simonb {
2885 1.2 simonb size_t slen;
2886 1.2 simonb off_t off = *offp;
2887 1.2 simonb int error;
2888 1.34 mlelstv daddr_t pbn;
2889 1.2 simonb
2890 1.114 riastrad KASSERT(((len >> wr->wr_log_dev_bshift) << wr->wr_log_dev_bshift) ==
2891 1.114 riastrad len);
2892 1.34 mlelstv
2893 1.14 joerg if (off < wr->wr_circ_off)
2894 1.14 joerg off = wr->wr_circ_off;
2895 1.14 joerg slen = wr->wr_circ_off + wr->wr_circ_size - off;
2896 1.2 simonb if (slen < len) {
2897 1.34 mlelstv pbn = wr->wr_logpbn + (off >> wr->wr_log_dev_bshift);
2898 1.34 mlelstv #ifdef _KERNEL
2899 1.34 mlelstv pbn = btodb(pbn << wr->wr_log_dev_bshift);
2900 1.34 mlelstv #endif
2901 1.34 mlelstv error = wapbl_read(data, slen, wr->wr_devvp, pbn);
2902 1.2 simonb if (error)
2903 1.2 simonb return error;
2904 1.2 simonb data = (uint8_t *)data + slen;
2905 1.2 simonb len -= slen;
2906 1.14 joerg off = wr->wr_circ_off;
2907 1.2 simonb }
2908 1.34 mlelstv pbn = wr->wr_logpbn + (off >> wr->wr_log_dev_bshift);
2909 1.34 mlelstv #ifdef _KERNEL
2910 1.34 mlelstv pbn = btodb(pbn << wr->wr_log_dev_bshift);
2911 1.34 mlelstv #endif
2912 1.34 mlelstv error = wapbl_read(data, len, wr->wr_devvp, pbn);
2913 1.2 simonb if (error)
2914 1.2 simonb return error;
2915 1.2 simonb off += len;
2916 1.14 joerg if (off >= wr->wr_circ_off + wr->wr_circ_size)
2917 1.14 joerg off = wr->wr_circ_off;
2918 1.2 simonb *offp = off;
2919 1.2 simonb return 0;
2920 1.2 simonb }
2921 1.2 simonb
2922 1.71 riastrad /*
2923 1.71 riastrad * wapbl_circ_advance(wr, len, offp)
2924 1.71 riastrad *
2925 1.71 riastrad * Compute the linear byte offset of the circular queue of wr that
2926 1.71 riastrad * is len bytes past *offp, and store it in *offp.
2927 1.71 riastrad *
2928 1.71 riastrad * This is as if wapbl_circ_read, but without actually reading
2929 1.71 riastrad * anything.
2930 1.71 riastrad *
2931 1.71 riastrad * If the starting linear byte offset precedes wr->wr_circ_off, it
2932 1.71 riastrad * is taken to be wr->wr_circ_off instead. XXX WTF? This should
2933 1.71 riastrad * be a KASSERT, not a conditional.
2934 1.71 riastrad */
2935 1.2 simonb static void
2936 1.2 simonb wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2937 1.2 simonb {
2938 1.2 simonb size_t slen;
2939 1.2 simonb off_t off = *offp;
2940 1.2 simonb
2941 1.114 riastrad KASSERT(((len >> wr->wr_log_dev_bshift) << wr->wr_log_dev_bshift) ==
2942 1.114 riastrad len);
2943 1.2 simonb
2944 1.14 joerg if (off < wr->wr_circ_off)
2945 1.14 joerg off = wr->wr_circ_off;
2946 1.14 joerg slen = wr->wr_circ_off + wr->wr_circ_size - off;
2947 1.2 simonb if (slen < len) {
2948 1.2 simonb len -= slen;
2949 1.14 joerg off = wr->wr_circ_off;
2950 1.2 simonb }
2951 1.2 simonb off += len;
2952 1.14 joerg if (off >= wr->wr_circ_off + wr->wr_circ_size)
2953 1.14 joerg off = wr->wr_circ_off;
2954 1.2 simonb *offp = off;
2955 1.2 simonb }
2956 1.2 simonb
2957 1.2 simonb /****************************************************************/
2958 1.2 simonb
2959 1.2 simonb int
2960 1.2 simonb wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2961 1.114 riastrad daddr_t off, size_t count, size_t blksize)
2962 1.2 simonb {
2963 1.2 simonb struct wapbl_replay *wr;
2964 1.2 simonb int error;
2965 1.2 simonb struct vnode *devvp;
2966 1.2 simonb daddr_t logpbn;
2967 1.2 simonb uint8_t *scratch;
2968 1.2 simonb struct wapbl_wc_header *wch;
2969 1.2 simonb struct wapbl_wc_header *wch2;
2970 1.2 simonb /* Use this until we read the actual log header */
2971 1.31 mlelstv int log_dev_bshift = ilog2(blksize);
2972 1.2 simonb size_t used;
2973 1.34 mlelstv daddr_t pbn;
2974 1.2 simonb
2975 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2976 1.114 riastrad ("wapbl_replay_start: vp=%p off=%"PRId64" count=%zu blksize=%zu\n",
2977 1.114 riastrad vp, off, count, blksize));
2978 1.2 simonb
2979 1.2 simonb if (off < 0)
2980 1.116 riastrad return SET_ERROR(EINVAL);
2981 1.2 simonb
2982 1.2 simonb if (blksize < DEV_BSIZE)
2983 1.116 riastrad return SET_ERROR(EINVAL);
2984 1.2 simonb if (blksize % DEV_BSIZE)
2985 1.116 riastrad return SET_ERROR(EINVAL);
2986 1.2 simonb
2987 1.2 simonb #ifdef _KERNEL
2988 1.2 simonb #if 0
2989 1.2 simonb /* XXX vp->v_size isn't reliably set for VBLK devices,
2990 1.2 simonb * especially root. However, we might still want to verify
2991 1.2 simonb * that the full load is readable */
2992 1.2 simonb if ((off + count) * blksize > vp->v_size)
2993 1.116 riastrad return SET_ERROR(EINVAL);
2994 1.2 simonb #endif
2995 1.2 simonb if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2996 1.2 simonb return error;
2997 1.2 simonb }
2998 1.2 simonb #else /* ! _KERNEL */
2999 1.2 simonb devvp = vp;
3000 1.2 simonb logpbn = off;
3001 1.2 simonb #endif /* ! _KERNEL */
3002 1.2 simonb
3003 1.51 para scratch = wapbl_alloc(MAXBSIZE);
3004 1.2 simonb
3005 1.34 mlelstv pbn = logpbn;
3006 1.34 mlelstv #ifdef _KERNEL
3007 1.34 mlelstv pbn = btodb(pbn << log_dev_bshift);
3008 1.34 mlelstv #endif
3009 1.34 mlelstv error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, pbn);
3010 1.2 simonb if (error)
3011 1.2 simonb goto errout;
3012 1.2 simonb
3013 1.2 simonb wch = (struct wapbl_wc_header *)scratch;
3014 1.2 simonb wch2 =
3015 1.2 simonb (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
3016 1.2 simonb /* XXX verify checksums and magic numbers */
3017 1.2 simonb if (wch->wc_type != WAPBL_WC_HEADER) {
3018 1.2 simonb printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
3019 1.116 riastrad error = SET_ERROR(EFTYPE);
3020 1.2 simonb goto errout;
3021 1.2 simonb }
3022 1.2 simonb
3023 1.2 simonb if (wch2->wc_generation > wch->wc_generation)
3024 1.2 simonb wch = wch2;
3025 1.2 simonb
3026 1.2 simonb wr = wapbl_calloc(1, sizeof(*wr));
3027 1.2 simonb
3028 1.2 simonb wr->wr_logvp = vp;
3029 1.2 simonb wr->wr_devvp = devvp;
3030 1.2 simonb wr->wr_logpbn = logpbn;
3031 1.2 simonb
3032 1.2 simonb wr->wr_scratch = scratch;
3033 1.2 simonb
3034 1.14 joerg wr->wr_log_dev_bshift = wch->wc_log_dev_bshift;
3035 1.14 joerg wr->wr_fs_dev_bshift = wch->wc_fs_dev_bshift;
3036 1.14 joerg wr->wr_circ_off = wch->wc_circ_off;
3037 1.14 joerg wr->wr_circ_size = wch->wc_circ_size;
3038 1.14 joerg wr->wr_generation = wch->wc_generation;
3039 1.2 simonb
3040 1.2 simonb used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
3041 1.2 simonb
3042 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
3043 1.2 simonb ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
3044 1.114 riastrad " len=%"PRId64" used=%zu\n",
3045 1.114 riastrad wch->wc_head, wch->wc_tail, wch->wc_circ_off,
3046 1.114 riastrad wch->wc_circ_size, used));
3047 1.2 simonb
3048 1.2 simonb wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
3049 1.11 joerg
3050 1.14 joerg error = wapbl_replay_process(wr, wch->wc_head, wch->wc_tail);
3051 1.2 simonb if (error) {
3052 1.2 simonb wapbl_replay_stop(wr);
3053 1.2 simonb wapbl_replay_free(wr);
3054 1.2 simonb return error;
3055 1.2 simonb }
3056 1.2 simonb
3057 1.2 simonb *wrp = wr;
3058 1.2 simonb return 0;
3059 1.2 simonb
3060 1.114 riastrad errout:
3061 1.18 yamt wapbl_free(scratch, MAXBSIZE);
3062 1.2 simonb return error;
3063 1.2 simonb }
3064 1.2 simonb
3065 1.2 simonb void
3066 1.2 simonb wapbl_replay_stop(struct wapbl_replay *wr)
3067 1.2 simonb {
3068 1.2 simonb
3069 1.4 joerg if (!wapbl_replay_isopen(wr))
3070 1.4 joerg return;
3071 1.4 joerg
3072 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
3073 1.2 simonb
3074 1.18 yamt wapbl_free(wr->wr_scratch, MAXBSIZE);
3075 1.18 yamt wr->wr_scratch = NULL;
3076 1.2 simonb
3077 1.18 yamt wr->wr_logvp = NULL;
3078 1.2 simonb
3079 1.2 simonb wapbl_blkhash_clear(wr);
3080 1.2 simonb wapbl_blkhash_free(wr);
3081 1.2 simonb }
3082 1.2 simonb
3083 1.2 simonb void
3084 1.2 simonb wapbl_replay_free(struct wapbl_replay *wr)
3085 1.2 simonb {
3086 1.2 simonb
3087 1.2 simonb KDASSERT(!wapbl_replay_isopen(wr));
3088 1.2 simonb
3089 1.114 riastrad if (wr->wr_inodes) {
3090 1.18 yamt wapbl_free(wr->wr_inodes,
3091 1.18 yamt wr->wr_inodescnt * sizeof(wr->wr_inodes[0]));
3092 1.114 riastrad }
3093 1.18 yamt wapbl_free(wr, sizeof(*wr));
3094 1.2 simonb }
3095 1.2 simonb
3096 1.4 joerg #ifdef _KERNEL
3097 1.2 simonb int
3098 1.2 simonb wapbl_replay_isopen1(struct wapbl_replay *wr)
3099 1.2 simonb {
3100 1.2 simonb
3101 1.2 simonb return wapbl_replay_isopen(wr);
3102 1.2 simonb }
3103 1.4 joerg #endif
3104 1.2 simonb
3105 1.62 mlelstv /*
3106 1.62 mlelstv * calculate the disk address for the i'th block in the wc_blockblist
3107 1.62 mlelstv * offset by j blocks of size blen.
3108 1.62 mlelstv *
3109 1.62 mlelstv * wc_daddr is always a kernel disk address in DEV_BSIZE units that
3110 1.62 mlelstv * was written to the journal.
3111 1.62 mlelstv *
3112 1.62 mlelstv * The kernel needs that address plus the offset in DEV_BSIZE units.
3113 1.62 mlelstv *
3114 1.62 mlelstv * Userland needs that address plus the offset in blen units.
3115 1.62 mlelstv *
3116 1.62 mlelstv */
3117 1.62 mlelstv static daddr_t
3118 1.62 mlelstv wapbl_block_daddr(struct wapbl_wc_blocklist *wc, int i, int j, int blen)
3119 1.62 mlelstv {
3120 1.62 mlelstv daddr_t pbn;
3121 1.62 mlelstv
3122 1.62 mlelstv #ifdef _KERNEL
3123 1.62 mlelstv pbn = wc->wc_blocks[i].wc_daddr + btodb(j * blen);
3124 1.62 mlelstv #else
3125 1.62 mlelstv pbn = dbtob(wc->wc_blocks[i].wc_daddr) / blen + j;
3126 1.62 mlelstv #endif
3127 1.62 mlelstv
3128 1.62 mlelstv return pbn;
3129 1.62 mlelstv }
3130 1.62 mlelstv
3131 1.10 joerg static void
3132 1.10 joerg wapbl_replay_process_blocks(struct wapbl_replay *wr, off_t *offp)
3133 1.10 joerg {
3134 1.10 joerg struct wapbl_wc_blocklist *wc =
3135 1.10 joerg (struct wapbl_wc_blocklist *)wr->wr_scratch;
3136 1.14 joerg int fsblklen = 1 << wr->wr_fs_dev_bshift;
3137 1.10 joerg int i, j, n;
3138 1.10 joerg
3139 1.10 joerg for (i = 0; i < wc->wc_blkcount; i++) {
3140 1.10 joerg /*
3141 1.10 joerg * Enter each physical block into the hashtable independently.
3142 1.10 joerg */
3143 1.14 joerg n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
3144 1.10 joerg for (j = 0; j < n; j++) {
3145 1.114 riastrad wapbl_blkhash_ins(wr,
3146 1.114 riastrad wapbl_block_daddr(wc, i, j, fsblklen),
3147 1.10 joerg *offp);
3148 1.10 joerg wapbl_circ_advance(wr, fsblklen, offp);
3149 1.10 joerg }
3150 1.10 joerg }
3151 1.10 joerg }
3152 1.10 joerg
3153 1.10 joerg static void
3154 1.10 joerg wapbl_replay_process_revocations(struct wapbl_replay *wr)
3155 1.10 joerg {
3156 1.10 joerg struct wapbl_wc_blocklist *wc =
3157 1.10 joerg (struct wapbl_wc_blocklist *)wr->wr_scratch;
3158 1.34 mlelstv int fsblklen = 1 << wr->wr_fs_dev_bshift;
3159 1.10 joerg int i, j, n;
3160 1.10 joerg
3161 1.10 joerg for (i = 0; i < wc->wc_blkcount; i++) {
3162 1.10 joerg /*
3163 1.10 joerg * Remove any blocks found from the hashtable.
3164 1.10 joerg */
3165 1.14 joerg n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
3166 1.114 riastrad for (j = 0; j < n; j++) {
3167 1.114 riastrad wapbl_blkhash_rem(wr, wapbl_block_daddr(wc, i, j,
3168 1.114 riastrad fsblklen));
3169 1.114 riastrad }
3170 1.10 joerg }
3171 1.10 joerg }
3172 1.10 joerg
3173 1.10 joerg static void
3174 1.114 riastrad wapbl_replay_process_inodes(struct wapbl_replay *wr, off_t oldoff,
3175 1.114 riastrad off_t newoff)
3176 1.10 joerg {
3177 1.10 joerg struct wapbl_wc_inodelist *wc =
3178 1.10 joerg (struct wapbl_wc_inodelist *)wr->wr_scratch;
3179 1.18 yamt void *new_inodes;
3180 1.18 yamt const size_t oldsize = wr->wr_inodescnt * sizeof(wr->wr_inodes[0]);
3181 1.18 yamt
3182 1.18 yamt KASSERT(sizeof(wr->wr_inodes[0]) == sizeof(wc->wc_inodes[0]));
3183 1.18 yamt
3184 1.10 joerg /*
3185 1.10 joerg * Keep track of where we found this so location won't be
3186 1.10 joerg * overwritten.
3187 1.10 joerg */
3188 1.10 joerg if (wc->wc_clear) {
3189 1.10 joerg wr->wr_inodestail = oldoff;
3190 1.10 joerg wr->wr_inodescnt = 0;
3191 1.12 joerg if (wr->wr_inodes != NULL) {
3192 1.18 yamt wapbl_free(wr->wr_inodes, oldsize);
3193 1.12 joerg wr->wr_inodes = NULL;
3194 1.12 joerg }
3195 1.10 joerg }
3196 1.10 joerg wr->wr_inodeshead = newoff;
3197 1.10 joerg if (wc->wc_inocnt == 0)
3198 1.10 joerg return;
3199 1.10 joerg
3200 1.51 para new_inodes = wapbl_alloc((wr->wr_inodescnt + wc->wc_inocnt) *
3201 1.18 yamt sizeof(wr->wr_inodes[0]));
3202 1.18 yamt if (wr->wr_inodes != NULL) {
3203 1.18 yamt memcpy(new_inodes, wr->wr_inodes, oldsize);
3204 1.18 yamt wapbl_free(wr->wr_inodes, oldsize);
3205 1.18 yamt }
3206 1.18 yamt wr->wr_inodes = new_inodes;
3207 1.10 joerg memcpy(&wr->wr_inodes[wr->wr_inodescnt], wc->wc_inodes,
3208 1.18 yamt wc->wc_inocnt * sizeof(wr->wr_inodes[0]));
3209 1.10 joerg wr->wr_inodescnt += wc->wc_inocnt;
3210 1.10 joerg }
3211 1.10 joerg
3212 1.2 simonb static int
3213 1.14 joerg wapbl_replay_process(struct wapbl_replay *wr, off_t head, off_t tail)
3214 1.2 simonb {
3215 1.2 simonb off_t off;
3216 1.2 simonb int error;
3217 1.2 simonb
3218 1.14 joerg int logblklen = 1 << wr->wr_log_dev_bshift;
3219 1.2 simonb
3220 1.2 simonb wapbl_blkhash_clear(wr);
3221 1.2 simonb
3222 1.14 joerg off = tail;
3223 1.14 joerg while (off != head) {
3224 1.2 simonb struct wapbl_wc_null *wcn;
3225 1.2 simonb off_t saveoff = off;
3226 1.2 simonb error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
3227 1.2 simonb if (error)
3228 1.2 simonb goto errout;
3229 1.2 simonb wcn = (struct wapbl_wc_null *)wr->wr_scratch;
3230 1.2 simonb switch (wcn->wc_type) {
3231 1.2 simonb case WAPBL_WC_BLOCKS:
3232 1.10 joerg wapbl_replay_process_blocks(wr, &off);
3233 1.2 simonb break;
3234 1.2 simonb
3235 1.2 simonb case WAPBL_WC_REVOCATIONS:
3236 1.10 joerg wapbl_replay_process_revocations(wr);
3237 1.2 simonb break;
3238 1.2 simonb
3239 1.2 simonb case WAPBL_WC_INODES:
3240 1.10 joerg wapbl_replay_process_inodes(wr, saveoff, off);
3241 1.2 simonb break;
3242 1.10 joerg
3243 1.2 simonb default:
3244 1.2 simonb printf("Unrecognized wapbl type: 0x%08x\n",
3245 1.114 riastrad wcn->wc_type);
3246 1.116 riastrad error = SET_ERROR(EFTYPE);
3247 1.2 simonb goto errout;
3248 1.2 simonb }
3249 1.2 simonb wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
3250 1.2 simonb if (off != saveoff) {
3251 1.2 simonb printf("wapbl_replay: corrupted records\n");
3252 1.116 riastrad error = SET_ERROR(EFTYPE);
3253 1.2 simonb goto errout;
3254 1.2 simonb }
3255 1.2 simonb }
3256 1.2 simonb return 0;
3257 1.2 simonb
3258 1.114 riastrad errout:
3259 1.2 simonb wapbl_blkhash_clear(wr);
3260 1.2 simonb return error;
3261 1.2 simonb }
3262 1.2 simonb
3263 1.13 joerg #if 0
3264 1.2 simonb int
3265 1.2 simonb wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
3266 1.2 simonb {
3267 1.2 simonb off_t off;
3268 1.2 simonb int mismatchcnt = 0;
3269 1.14 joerg int logblklen = 1 << wr->wr_log_dev_bshift;
3270 1.14 joerg int fsblklen = 1 << wr->wr_fs_dev_bshift;
3271 1.51 para void *scratch1 = wapbl_alloc(MAXBSIZE);
3272 1.51 para void *scratch2 = wapbl_alloc(MAXBSIZE);
3273 1.2 simonb int error = 0;
3274 1.2 simonb
3275 1.2 simonb KDASSERT(wapbl_replay_isopen(wr));
3276 1.2 simonb
3277 1.2 simonb off = wch->wc_tail;
3278 1.2 simonb while (off != wch->wc_head) {
3279 1.2 simonb struct wapbl_wc_null *wcn;
3280 1.2 simonb #ifdef DEBUG
3281 1.2 simonb off_t saveoff = off;
3282 1.2 simonb #endif
3283 1.2 simonb error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
3284 1.2 simonb if (error)
3285 1.2 simonb goto out;
3286 1.2 simonb wcn = (struct wapbl_wc_null *)wr->wr_scratch;
3287 1.2 simonb switch (wcn->wc_type) {
3288 1.114 riastrad case WAPBL_WC_BLOCKS: {
3289 1.114 riastrad struct wapbl_wc_blocklist *wc =
3290 1.114 riastrad (struct wapbl_wc_blocklist *)wr->wr_scratch;
3291 1.114 riastrad int i;
3292 1.114 riastrad for (i = 0; i < wc->wc_blkcount; i++) {
3293 1.114 riastrad int foundcnt = 0;
3294 1.114 riastrad int dirtycnt = 0;
3295 1.114 riastrad int j, n;
3296 1.114 riastrad /*
3297 1.114 riastrad * Check each physical block into the
3298 1.114 riastrad * hashtable independently
3299 1.114 riastrad */
3300 1.114 riastrad n = wc->wc_blocks[i].wc_dlen >>
3301 1.114 riastrad wch->wc_fs_dev_bshift;
3302 1.114 riastrad for (j = 0; j < n; j++) {
3303 1.114 riastrad struct wapbl_blk *wb =
3304 1.114 riastrad wapbl_blkhash_get(wr,
3305 1.114 riastrad wapbl_block_daddr(wc, i, j,
3306 1.114 riastrad fsblklen));
3307 1.114 riastrad if (wb && wb->wb_off == off) {
3308 1.114 riastrad foundcnt++;
3309 1.114 riastrad error =
3310 1.114 riastrad wapbl_circ_read(wr,
3311 1.114 riastrad scratch1, fsblklen,
3312 1.114 riastrad &off);
3313 1.114 riastrad if (error)
3314 1.114 riastrad goto out;
3315 1.114 riastrad error =
3316 1.114 riastrad wapbl_read(scratch2,
3317 1.114 riastrad fsblklen, fsdevvp,
3318 1.114 riastrad wb->wb_blk);
3319 1.114 riastrad if (error)
3320 1.114 riastrad goto out;
3321 1.114 riastrad if (memcmp(scratch1,
3322 1.114 riastrad scratch2,
3323 1.114 riastrad fsblklen)) {
3324 1.114 riastrad printf("wapbl_verify:"
3325 1.114 riastrad " mismatch block"
3326 1.114 riastrad " %"PRId64
3327 1.114 riastrad " at off"
3328 1.114 riastrad " %"PRIdMAX"\n",
3329 1.114 riastrad wb->wb_blk,
3330 1.114 riastrad (intmax_t)off);
3331 1.114 riastrad dirtycnt++;
3332 1.114 riastrad mismatchcnt++;
3333 1.114 riastrad }
3334 1.114 riastrad } else {
3335 1.114 riastrad wapbl_circ_advance(wr,
3336 1.114 riastrad fsblklen, &off);
3337 1.114 riastrad }
3338 1.114 riastrad }
3339 1.114 riastrad #if 0
3340 1.114 riastrad /*
3341 1.114 riastrad * If all of the blocks in an entry
3342 1.114 riastrad * are clean, then remove all of its
3343 1.114 riastrad * blocks from the hashtable since they
3344 1.114 riastrad * never will need replay.
3345 1.114 riastrad */
3346 1.114 riastrad if (foundcnt != 0 && dirtycnt == 0) {
3347 1.114 riastrad off = saveoff;
3348 1.114 riastrad wapbl_circ_advance(wr, logblklen,
3349 1.114 riastrad &off);
3350 1.2 simonb for (j = 0; j < n; j++) {
3351 1.2 simonb struct wapbl_blk *wb =
3352 1.114 riastrad wapbl_blkhash_get(wr,
3353 1.114 riastrad wapbl_block_daddr(wc,
3354 1.114 riastrad i, j, fsblklen));
3355 1.114 riastrad if (wb &&
3356 1.114 riastrad (wb->wb_off == off)) {
3357 1.114 riastrad wapbl_blkhash_rem(wr,
3358 1.2 simonb wb->wb_blk);
3359 1.2 simonb }
3360 1.2 simonb wapbl_circ_advance(wr,
3361 1.114 riastrad fsblklen, &off);
3362 1.2 simonb }
3363 1.114 riastrad }
3364 1.2 simonb #endif
3365 1.2 simonb }
3366 1.114 riastrad }
3367 1.2 simonb break;
3368 1.2 simonb case WAPBL_WC_REVOCATIONS:
3369 1.2 simonb case WAPBL_WC_INODES:
3370 1.2 simonb break;
3371 1.2 simonb default:
3372 1.2 simonb KASSERT(0);
3373 1.2 simonb }
3374 1.2 simonb #ifdef DEBUG
3375 1.2 simonb wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
3376 1.2 simonb KASSERT(off == saveoff);
3377 1.2 simonb #endif
3378 1.2 simonb }
3379 1.114 riastrad out:
3380 1.18 yamt wapbl_free(scratch1, MAXBSIZE);
3381 1.18 yamt wapbl_free(scratch2, MAXBSIZE);
3382 1.2 simonb if (!error && mismatchcnt)
3383 1.116 riastrad error = SET_ERROR(EFTYPE);
3384 1.2 simonb return error;
3385 1.2 simonb }
3386 1.2 simonb #endif
3387 1.2 simonb
3388 1.2 simonb int
3389 1.2 simonb wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
3390 1.2 simonb {
3391 1.9 joerg struct wapbl_blk *wb;
3392 1.9 joerg size_t i;
3393 1.2 simonb off_t off;
3394 1.9 joerg void *scratch;
3395 1.2 simonb int error = 0;
3396 1.14 joerg int fsblklen = 1 << wr->wr_fs_dev_bshift;
3397 1.2 simonb
3398 1.2 simonb KDASSERT(wapbl_replay_isopen(wr));
3399 1.2 simonb
3400 1.51 para scratch = wapbl_alloc(MAXBSIZE);
3401 1.2 simonb
3402 1.37 drochner for (i = 0; i <= wr->wr_blkhashmask; ++i) {
3403 1.9 joerg LIST_FOREACH(wb, &wr->wr_blkhash[i], wb_hash) {
3404 1.9 joerg off = wb->wb_off;
3405 1.9 joerg error = wapbl_circ_read(wr, scratch, fsblklen, &off);
3406 1.9 joerg if (error)
3407 1.9 joerg break;
3408 1.9 joerg error = wapbl_write(scratch, fsblklen, fsdevvp,
3409 1.9 joerg wb->wb_blk);
3410 1.9 joerg if (error)
3411 1.9 joerg break;
3412 1.2 simonb }
3413 1.2 simonb }
3414 1.9 joerg
3415 1.18 yamt wapbl_free(scratch, MAXBSIZE);
3416 1.2 simonb return error;
3417 1.2 simonb }
3418 1.2 simonb
3419 1.2 simonb int
3420 1.6 joerg wapbl_replay_can_read(struct wapbl_replay *wr, daddr_t blk, long len)
3421 1.6 joerg {
3422 1.14 joerg int fsblklen = 1 << wr->wr_fs_dev_bshift;
3423 1.6 joerg
3424 1.6 joerg KDASSERT(wapbl_replay_isopen(wr));
3425 1.6 joerg KASSERT((len % fsblklen) == 0);
3426 1.6 joerg
3427 1.6 joerg while (len != 0) {
3428 1.6 joerg struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
3429 1.6 joerg if (wb)
3430 1.6 joerg return 1;
3431 1.6 joerg len -= fsblklen;
3432 1.6 joerg }
3433 1.6 joerg return 0;
3434 1.6 joerg }
3435 1.6 joerg
3436 1.6 joerg int
3437 1.2 simonb wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
3438 1.2 simonb {
3439 1.14 joerg int fsblklen = 1 << wr->wr_fs_dev_bshift;
3440 1.2 simonb
3441 1.2 simonb KDASSERT(wapbl_replay_isopen(wr));
3442 1.2 simonb
3443 1.2 simonb KASSERT((len % fsblklen) == 0);
3444 1.2 simonb
3445 1.2 simonb while (len != 0) {
3446 1.2 simonb struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
3447 1.2 simonb if (wb) {
3448 1.2 simonb off_t off = wb->wb_off;
3449 1.2 simonb int error;
3450 1.2 simonb error = wapbl_circ_read(wr, data, fsblklen, &off);
3451 1.2 simonb if (error)
3452 1.2 simonb return error;
3453 1.2 simonb }
3454 1.2 simonb data = (uint8_t *)data + fsblklen;
3455 1.2 simonb len -= fsblklen;
3456 1.2 simonb blk++;
3457 1.2 simonb }
3458 1.2 simonb return 0;
3459 1.2 simonb }
3460 1.35 pooka
3461 1.36 pooka #ifdef _KERNEL
3462 1.64 pgoyette
3463 1.35 pooka MODULE(MODULE_CLASS_VFS, wapbl, NULL);
3464 1.35 pooka
3465 1.35 pooka static int
3466 1.35 pooka wapbl_modcmd(modcmd_t cmd, void *arg)
3467 1.35 pooka {
3468 1.35 pooka
3469 1.35 pooka switch (cmd) {
3470 1.35 pooka case MODULE_CMD_INIT:
3471 1.39 christos wapbl_init();
3472 1.35 pooka return 0;
3473 1.35 pooka case MODULE_CMD_FINI:
3474 1.74 riastrad return wapbl_fini();
3475 1.35 pooka default:
3476 1.116 riastrad return SET_ERROR(ENOTTY);
3477 1.35 pooka }
3478 1.35 pooka }
3479 1.114 riastrad
3480 1.36 pooka #endif /* _KERNEL */
3481