vfs_wapbl.c revision 1.116 1 1.116 riastrad /* $NetBSD: vfs_wapbl.c,v 1.116 2024/12/07 02:27:38 riastradh Exp $ */
2 1.2 simonb
3 1.2 simonb /*-
4 1.23 ad * Copyright (c) 2003, 2008, 2009 The NetBSD Foundation, Inc.
5 1.2 simonb * All rights reserved.
6 1.2 simonb *
7 1.2 simonb * This code is derived from software contributed to The NetBSD Foundation
8 1.2 simonb * by Wasabi Systems, Inc.
9 1.2 simonb *
10 1.2 simonb * Redistribution and use in source and binary forms, with or without
11 1.2 simonb * modification, are permitted provided that the following conditions
12 1.2 simonb * are met:
13 1.2 simonb * 1. Redistributions of source code must retain the above copyright
14 1.2 simonb * notice, this list of conditions and the following disclaimer.
15 1.2 simonb * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 simonb * notice, this list of conditions and the following disclaimer in the
17 1.2 simonb * documentation and/or other materials provided with the distribution.
18 1.2 simonb *
19 1.2 simonb * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 simonb * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 simonb * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 simonb * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 simonb * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 simonb * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 simonb * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 simonb * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 simonb * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 simonb * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 simonb * POSSIBILITY OF SUCH DAMAGE.
30 1.2 simonb */
31 1.2 simonb
32 1.2 simonb /*
33 1.2 simonb * This implements file system independent write ahead filesystem logging.
34 1.2 simonb */
35 1.4 joerg
36 1.4 joerg #define WAPBL_INTERNAL
37 1.4 joerg
38 1.2 simonb #include <sys/cdefs.h>
39 1.116 riastrad __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.116 2024/12/07 02:27:38 riastradh Exp $");
40 1.2 simonb
41 1.2 simonb #include <sys/param.h>
42 1.114 riastrad #include <sys/types.h>
43 1.114 riastrad
44 1.31 mlelstv #include <sys/bitops.h>
45 1.68 riastrad #include <sys/time.h>
46 1.68 riastrad #include <sys/wapbl.h>
47 1.68 riastrad #include <sys/wapbl_replay.h>
48 1.2 simonb
49 1.2 simonb #ifdef _KERNEL
50 1.68 riastrad
51 1.68 riastrad #include <sys/atomic.h>
52 1.68 riastrad #include <sys/conf.h>
53 1.90 riastrad #include <sys/evcnt.h>
54 1.68 riastrad #include <sys/file.h>
55 1.68 riastrad #include <sys/kauth.h>
56 1.68 riastrad #include <sys/kernel.h>
57 1.68 riastrad #include <sys/module.h>
58 1.68 riastrad #include <sys/mount.h>
59 1.68 riastrad #include <sys/mutex.h>
60 1.2 simonb #include <sys/namei.h>
61 1.2 simonb #include <sys/proc.h>
62 1.68 riastrad #include <sys/resourcevar.h>
63 1.116 riastrad #include <sys/sdt.h>
64 1.39 christos #include <sys/sysctl.h>
65 1.2 simonb #include <sys/uio.h>
66 1.2 simonb #include <sys/vnode.h>
67 1.2 simonb
68 1.2 simonb #include <miscfs/specfs/specdev.h>
69 1.2 simonb
70 1.114 riastrad #define wapbl_alloc(s) kmem_alloc((s), KM_SLEEP)
71 1.114 riastrad #define wapbl_free(a, s) kmem_free((a), (s))
72 1.114 riastrad #define wapbl_calloc(n, s) kmem_zalloc((n)*(s), KM_SLEEP)
73 1.2 simonb
74 1.39 christos static int wapbl_flush_disk_cache = 1;
75 1.39 christos static int wapbl_verbose_commit = 0;
76 1.115 riastrad static int wapbl_allow_dpofua = 0; /* switched off by default for now */
77 1.95 jdolecek static int wapbl_journal_iobufs = 4;
78 1.39 christos
79 1.57 joerg static inline size_t wapbl_space_free(size_t, off_t, off_t);
80 1.57 joerg
81 1.2 simonb #else /* !_KERNEL */
82 1.68 riastrad
83 1.2 simonb #include <assert.h>
84 1.2 simonb #include <errno.h>
85 1.68 riastrad #include <stdbool.h>
86 1.2 simonb #include <stdio.h>
87 1.2 simonb #include <stdlib.h>
88 1.2 simonb #include <string.h>
89 1.2 simonb
90 1.114 riastrad #define KDASSERT(x) assert(x)
91 1.114 riastrad #define KASSERT(x) assert(x)
92 1.114 riastrad #define wapbl_alloc(s) malloc(s)
93 1.114 riastrad #define wapbl_free(a, s) free(a)
94 1.114 riastrad #define wapbl_calloc(n, s) calloc((n), (s))
95 1.2 simonb
96 1.2 simonb #endif /* !_KERNEL */
97 1.2 simonb
98 1.2 simonb /*
99 1.2 simonb * INTERNAL DATA STRUCTURES
100 1.2 simonb */
101 1.2 simonb
102 1.91 riastrad /*
103 1.2 simonb * This structure holds per-mount log information.
104 1.2 simonb *
105 1.2 simonb * Legend: a = atomic access only
106 1.2 simonb * r = read-only after init
107 1.2 simonb * l = rwlock held
108 1.2 simonb * m = mutex held
109 1.38 hannken * lm = rwlock held writing or mutex held
110 1.2 simonb * u = unlocked access ok
111 1.2 simonb * b = bufcache_lock held
112 1.2 simonb */
113 1.60 matt LIST_HEAD(wapbl_ino_head, wapbl_ino);
114 1.2 simonb struct wapbl {
115 1.2 simonb struct vnode *wl_logvp; /* r: log here */
116 1.2 simonb struct vnode *wl_devvp; /* r: log on this device */
117 1.2 simonb struct mount *wl_mount; /* r: mountpoint wl is associated with */
118 1.2 simonb daddr_t wl_logpbn; /* r: Physical block number of start of log */
119 1.2 simonb int wl_log_dev_bshift; /* r: logarithm of device block size of log
120 1.2 simonb device */
121 1.2 simonb int wl_fs_dev_bshift; /* r: logarithm of device block size of
122 1.2 simonb filesystem device */
123 1.2 simonb
124 1.3 yamt unsigned wl_lock_count; /* m: Count of transactions in progress */
125 1.2 simonb
126 1.115 riastrad size_t wl_circ_size; /* r: Number of bytes in buffer of log */
127 1.2 simonb size_t wl_circ_off; /* r: Number of bytes reserved at start */
128 1.2 simonb
129 1.2 simonb size_t wl_bufcount_max; /* r: Number of buffers reserved for log */
130 1.2 simonb size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */
131 1.2 simonb
132 1.2 simonb off_t wl_head; /* l: Byte offset of log head */
133 1.2 simonb off_t wl_tail; /* l: Byte offset of log tail */
134 1.2 simonb /*
135 1.71 riastrad * WAPBL log layout, stored on wl_devvp at wl_logpbn:
136 1.71 riastrad *
137 1.71 riastrad * ___________________ wl_circ_size __________________
138 1.71 riastrad * / \
139 1.71 riastrad * +---------+---------+-------+--------------+--------+
140 1.71 riastrad * [ commit0 | commit1 | CCWCW | EEEEEEEEEEEE | CCCWCW ]
141 1.71 riastrad * +---------+---------+-------+--------------+--------+
142 1.71 riastrad * wl_circ_off --^ ^-- wl_head ^-- wl_tail
143 1.71 riastrad *
144 1.71 riastrad * commit0 and commit1 are commit headers. A commit header has
145 1.71 riastrad * a generation number, indicating which of the two headers is
146 1.71 riastrad * more recent, and an assignment of head and tail pointers.
147 1.71 riastrad * The rest is a circular queue of log records, starting at
148 1.71 riastrad * the byte offset wl_circ_off.
149 1.71 riastrad *
150 1.71 riastrad * E marks empty space for records.
151 1.71 riastrad * W marks records for block writes issued but waiting.
152 1.71 riastrad * C marks completed records.
153 1.71 riastrad *
154 1.71 riastrad * wapbl_flush writes new records to empty `E' spaces after
155 1.71 riastrad * wl_head from the current transaction in memory.
156 1.71 riastrad *
157 1.71 riastrad * wapbl_truncate advances wl_tail past any completed `C'
158 1.71 riastrad * records, freeing them up for use.
159 1.71 riastrad *
160 1.71 riastrad * head == tail == 0 means log is empty.
161 1.71 riastrad * head == tail != 0 means log is full.
162 1.71 riastrad *
163 1.71 riastrad * See assertions in wapbl_advance() for other boundary
164 1.71 riastrad * conditions.
165 1.71 riastrad *
166 1.71 riastrad * Only wapbl_flush moves the head, except when wapbl_truncate
167 1.71 riastrad * sets it to 0 to indicate that the log is empty.
168 1.71 riastrad *
169 1.71 riastrad * Only wapbl_truncate moves the tail, except when wapbl_flush
170 1.71 riastrad * sets it to wl_circ_off to indicate that the log is full.
171 1.2 simonb */
172 1.2 simonb
173 1.2 simonb struct wapbl_wc_header *wl_wc_header; /* l */
174 1.2 simonb void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */
175 1.2 simonb
176 1.2 simonb kmutex_t wl_mtx; /* u: short-term lock */
177 1.2 simonb krwlock_t wl_rwlock; /* u: File system transaction lock */
178 1.2 simonb
179 1.2 simonb /*
180 1.2 simonb * Must be held while accessing
181 1.2 simonb * wl_count or wl_bufs or head or tail
182 1.2 simonb */
183 1.2 simonb
184 1.87 jdolecek #if _KERNEL
185 1.2 simonb /*
186 1.2 simonb * Callback called from within the flush routine to flush any extra
187 1.2 simonb * bits. Note that flush may be skipped without calling this if
188 1.2 simonb * there are no outstanding buffers in the transaction.
189 1.2 simonb */
190 1.2 simonb wapbl_flush_fn_t wl_flush; /* r */
191 1.2 simonb wapbl_flush_fn_t wl_flush_abort;/* r */
192 1.87 jdolecek
193 1.87 jdolecek /* Event counters */
194 1.87 jdolecek char wl_ev_group[EVCNT_STRING_MAX]; /* r */
195 1.87 jdolecek struct evcnt wl_ev_commit; /* l */
196 1.87 jdolecek struct evcnt wl_ev_journalwrite; /* l */
197 1.95 jdolecek struct evcnt wl_ev_jbufs_bio_nowait; /* l */
198 1.87 jdolecek struct evcnt wl_ev_metawrite; /* lm */
199 1.87 jdolecek struct evcnt wl_ev_cacheflush; /* l */
200 1.5 joerg #endif
201 1.2 simonb
202 1.2 simonb size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */
203 1.2 simonb size_t wl_bufcount; /* m: Count of buffers in wl_bufs */
204 1.2 simonb size_t wl_bcount; /* m: Total bcount of wl_bufs */
205 1.2 simonb
206 1.94 jdolecek TAILQ_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
207 1.2 simonb
208 1.2 simonb kcondvar_t wl_reclaimable_cv; /* m (obviously) */
209 1.2 simonb size_t wl_reclaimable_bytes; /* m: Amount of space available for
210 1.2 simonb reclamation by truncate */
211 1.2 simonb int wl_error_count; /* m: # of wl_entries with errors */
212 1.2 simonb size_t wl_reserved_bytes; /* never truncate log smaller than this */
213 1.2 simonb
214 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
215 1.2 simonb size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
216 1.2 simonb #endif
217 1.2 simonb
218 1.79 jdolecek #if _KERNEL
219 1.79 jdolecek int wl_brperjblock; /* r Block records per journal block */
220 1.79 jdolecek #endif
221 1.79 jdolecek
222 1.86 jdolecek TAILQ_HEAD(, wapbl_dealloc) wl_dealloclist; /* lm: list head */
223 1.81 jdolecek int wl_dealloccnt; /* lm: total count */
224 1.81 jdolecek int wl_dealloclim; /* r: max count */
225 1.2 simonb
226 1.2 simonb /* hashtable of inode numbers for allocated but unlinked inodes */
227 1.2 simonb /* synch ??? */
228 1.60 matt struct wapbl_ino_head *wl_inohash;
229 1.2 simonb u_long wl_inohashmask;
230 1.2 simonb int wl_inohashcnt;
231 1.2 simonb
232 1.107 jdolecek SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* m: On disk transaction
233 1.2 simonb accounting */
234 1.54 hannken
235 1.95 jdolecek /* buffers for wapbl_buffered_write() */
236 1.95 jdolecek TAILQ_HEAD(, buf) wl_iobufs; /* l: Free or filling bufs */
237 1.95 jdolecek TAILQ_HEAD(, buf) wl_iobufs_busy; /* l: In-transit bufs */
238 1.93 jdolecek
239 1.115 riastrad int wl_dkcache; /* r: disk cache flags */
240 1.93 jdolecek #define WAPBL_USE_FUA(wl) \
241 1.96 jdolecek (wapbl_allow_dpofua && ISSET((wl)->wl_dkcache, DKCACHE_FUA))
242 1.93 jdolecek #define WAPBL_JFLAGS(wl) \
243 1.93 jdolecek (WAPBL_USE_FUA(wl) ? (wl)->wl_jwrite_flags : 0)
244 1.101 jdolecek #define WAPBL_JDATA_FLAGS(wl) \
245 1.101 jdolecek (WAPBL_JFLAGS(wl) & B_MEDIA_DPO) /* only DPO */
246 1.115 riastrad int wl_jwrite_flags; /* r: journal write flags */
247 1.2 simonb };
248 1.2 simonb
249 1.2 simonb #ifdef WAPBL_DEBUG_PRINT
250 1.2 simonb int wapbl_debug_print = WAPBL_DEBUG_PRINT;
251 1.2 simonb #endif
252 1.2 simonb
253 1.2 simonb /****************************************************************/
254 1.2 simonb #ifdef _KERNEL
255 1.2 simonb
256 1.2 simonb #ifdef WAPBL_DEBUG
257 1.2 simonb struct wapbl *wapbl_debug_wl;
258 1.2 simonb #endif
259 1.2 simonb
260 1.114 riastrad static int wapbl_write_commit(struct wapbl *, off_t, off_t);
261 1.114 riastrad static int wapbl_write_blocks(struct wapbl *, off_t *);
262 1.114 riastrad static int wapbl_write_revocations(struct wapbl *, off_t *);
263 1.114 riastrad static int wapbl_write_inodes(struct wapbl *, off_t *);
264 1.2 simonb #endif /* _KERNEL */
265 1.2 simonb
266 1.114 riastrad static int wapbl_replay_process(struct wapbl_replay *, off_t, off_t);
267 1.2 simonb
268 1.114 riastrad static inline size_t wapbl_space_used(size_t, off_t, off_t);
269 1.2 simonb
270 1.2 simonb #ifdef _KERNEL
271 1.2 simonb
272 1.51 para static struct pool wapbl_entry_pool;
273 1.81 jdolecek static struct pool wapbl_dealloc_pool;
274 1.51 para
275 1.2 simonb #define WAPBL_INODETRK_SIZE 83
276 1.2 simonb static int wapbl_ino_pool_refcount;
277 1.2 simonb static struct pool wapbl_ino_pool;
278 1.2 simonb struct wapbl_ino {
279 1.2 simonb LIST_ENTRY(wapbl_ino) wi_hash;
280 1.2 simonb ino_t wi_ino;
281 1.2 simonb mode_t wi_mode;
282 1.2 simonb };
283 1.2 simonb
284 1.2 simonb static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
285 1.2 simonb static void wapbl_inodetrk_free(struct wapbl *wl);
286 1.2 simonb static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
287 1.2 simonb
288 1.2 simonb static size_t wapbl_transaction_len(struct wapbl *wl);
289 1.30 uebayasi static inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
290 1.2 simonb
291 1.86 jdolecek static void wapbl_deallocation_free(struct wapbl *, struct wapbl_dealloc *,
292 1.114 riastrad bool);
293 1.86 jdolecek
294 1.87 jdolecek static void wapbl_evcnt_init(struct wapbl *);
295 1.87 jdolecek static void wapbl_evcnt_free(struct wapbl *);
296 1.87 jdolecek
297 1.93 jdolecek static void wapbl_dkcache_init(struct wapbl *);
298 1.93 jdolecek
299 1.13 joerg #if 0
300 1.4 joerg int wapbl_replay_verify(struct wapbl_replay *, struct vnode *);
301 1.4 joerg #endif
302 1.4 joerg
303 1.4 joerg static int wapbl_replay_isopen1(struct wapbl_replay *);
304 1.4 joerg
305 1.103 jdolecek const struct wapbl_ops wapbl_ops = {
306 1.2 simonb .wo_wapbl_discard = wapbl_discard,
307 1.2 simonb .wo_wapbl_replay_isopen = wapbl_replay_isopen1,
308 1.6 joerg .wo_wapbl_replay_can_read = wapbl_replay_can_read,
309 1.2 simonb .wo_wapbl_replay_read = wapbl_replay_read,
310 1.2 simonb .wo_wapbl_add_buf = wapbl_add_buf,
311 1.2 simonb .wo_wapbl_remove_buf = wapbl_remove_buf,
312 1.2 simonb .wo_wapbl_resize_buf = wapbl_resize_buf,
313 1.2 simonb .wo_wapbl_begin = wapbl_begin,
314 1.2 simonb .wo_wapbl_end = wapbl_end,
315 1.2 simonb .wo_wapbl_junlock_assert= wapbl_junlock_assert,
316 1.102 jdolecek .wo_wapbl_jlock_assert = wapbl_jlock_assert,
317 1.2 simonb
318 1.2 simonb /* XXX: the following is only used to say "this is a wapbl buf" */
319 1.2 simonb .wo_wapbl_biodone = wapbl_biodone,
320 1.2 simonb };
321 1.2 simonb
322 1.106 pgoyette SYSCTL_SETUP(wapbl_sysctl_init, "wapbl sysctl")
323 1.39 christos {
324 1.39 christos int rv;
325 1.39 christos const struct sysctlnode *rnode, *cnode;
326 1.39 christos
327 1.106 pgoyette rv = sysctl_createv(clog, 0, NULL, &rnode,
328 1.114 riastrad CTLFLAG_PERMANENT,
329 1.114 riastrad CTLTYPE_NODE, "wapbl",
330 1.114 riastrad SYSCTL_DESCR("WAPBL journaling options"),
331 1.114 riastrad NULL, 0, NULL, 0,
332 1.114 riastrad CTL_VFS, CTL_CREATE, CTL_EOL);
333 1.39 christos if (rv)
334 1.106 pgoyette return;
335 1.39 christos
336 1.106 pgoyette rv = sysctl_createv(clog, 0, &rnode, &cnode,
337 1.114 riastrad CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
338 1.114 riastrad CTLTYPE_INT, "flush_disk_cache",
339 1.114 riastrad SYSCTL_DESCR("flush disk cache"),
340 1.114 riastrad NULL, 0, &wapbl_flush_disk_cache, 0,
341 1.114 riastrad CTL_CREATE, CTL_EOL);
342 1.39 christos if (rv)
343 1.106 pgoyette return;
344 1.39 christos
345 1.106 pgoyette rv = sysctl_createv(clog, 0, &rnode, &cnode,
346 1.114 riastrad CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
347 1.114 riastrad CTLTYPE_INT, "verbose_commit",
348 1.114 riastrad SYSCTL_DESCR("show time and size of wapbl log commits"),
349 1.114 riastrad NULL, 0, &wapbl_verbose_commit, 0,
350 1.114 riastrad CTL_CREATE, CTL_EOL);
351 1.93 jdolecek if (rv)
352 1.106 pgoyette return;
353 1.93 jdolecek
354 1.106 pgoyette rv = sysctl_createv(clog, 0, &rnode, &cnode,
355 1.114 riastrad CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
356 1.114 riastrad CTLTYPE_INT, "allow_dpofua",
357 1.114 riastrad SYSCTL_DESCR("allow use of FUA/DPO instead of cache flush"
358 1.114 riastrad " if available"),
359 1.114 riastrad NULL, 0, &wapbl_allow_dpofua, 0,
360 1.114 riastrad CTL_CREATE, CTL_EOL);
361 1.93 jdolecek if (rv)
362 1.106 pgoyette return;
363 1.93 jdolecek
364 1.106 pgoyette rv = sysctl_createv(clog, 0, &rnode, &cnode,
365 1.114 riastrad CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
366 1.114 riastrad CTLTYPE_INT, "journal_iobufs",
367 1.114 riastrad SYSCTL_DESCR("count of bufs used for journal I/O"
368 1.114 riastrad " (max async count)"),
369 1.114 riastrad NULL, 0, &wapbl_journal_iobufs, 0,
370 1.114 riastrad CTL_CREATE, CTL_EOL);
371 1.95 jdolecek if (rv)
372 1.106 pgoyette return;
373 1.95 jdolecek
374 1.106 pgoyette return;
375 1.39 christos }
376 1.39 christos
377 1.39 christos static void
378 1.39 christos wapbl_init(void)
379 1.39 christos {
380 1.51 para
381 1.51 para pool_init(&wapbl_entry_pool, sizeof(struct wapbl_entry), 0, 0, 0,
382 1.51 para "wapblentrypl", &pool_allocator_kmem, IPL_VM);
383 1.81 jdolecek pool_init(&wapbl_dealloc_pool, sizeof(struct wapbl_dealloc), 0, 0, 0,
384 1.81 jdolecek "wapbldealloc", &pool_allocator_nointr, IPL_NONE);
385 1.39 christos }
386 1.39 christos
387 1.39 christos static int
388 1.74 riastrad wapbl_fini(void)
389 1.39 christos {
390 1.51 para
391 1.81 jdolecek pool_destroy(&wapbl_dealloc_pool);
392 1.51 para pool_destroy(&wapbl_entry_pool);
393 1.51 para
394 1.39 christos return 0;
395 1.39 christos }
396 1.39 christos
397 1.87 jdolecek static void
398 1.87 jdolecek wapbl_evcnt_init(struct wapbl *wl)
399 1.87 jdolecek {
400 1.114 riastrad
401 1.87 jdolecek snprintf(wl->wl_ev_group, sizeof(wl->wl_ev_group),
402 1.87 jdolecek "wapbl fsid 0x%x/0x%x",
403 1.87 jdolecek wl->wl_mount->mnt_stat.f_fsidx.__fsid_val[0],
404 1.114 riastrad wl->wl_mount->mnt_stat.f_fsidx.__fsid_val[1]);
405 1.87 jdolecek
406 1.87 jdolecek evcnt_attach_dynamic(&wl->wl_ev_commit, EVCNT_TYPE_MISC,
407 1.87 jdolecek NULL, wl->wl_ev_group, "commit");
408 1.87 jdolecek evcnt_attach_dynamic(&wl->wl_ev_journalwrite, EVCNT_TYPE_MISC,
409 1.98 jdolecek NULL, wl->wl_ev_group, "journal write total");
410 1.95 jdolecek evcnt_attach_dynamic(&wl->wl_ev_jbufs_bio_nowait, EVCNT_TYPE_MISC,
411 1.98 jdolecek NULL, wl->wl_ev_group, "journal write finished async");
412 1.87 jdolecek evcnt_attach_dynamic(&wl->wl_ev_metawrite, EVCNT_TYPE_MISC,
413 1.98 jdolecek NULL, wl->wl_ev_group, "metadata async write");
414 1.87 jdolecek evcnt_attach_dynamic(&wl->wl_ev_cacheflush, EVCNT_TYPE_MISC,
415 1.87 jdolecek NULL, wl->wl_ev_group, "cache flush");
416 1.87 jdolecek }
417 1.87 jdolecek
418 1.87 jdolecek static void
419 1.87 jdolecek wapbl_evcnt_free(struct wapbl *wl)
420 1.87 jdolecek {
421 1.114 riastrad
422 1.87 jdolecek evcnt_detach(&wl->wl_ev_commit);
423 1.87 jdolecek evcnt_detach(&wl->wl_ev_journalwrite);
424 1.95 jdolecek evcnt_detach(&wl->wl_ev_jbufs_bio_nowait);
425 1.87 jdolecek evcnt_detach(&wl->wl_ev_metawrite);
426 1.87 jdolecek evcnt_detach(&wl->wl_ev_cacheflush);
427 1.87 jdolecek }
428 1.87 jdolecek
429 1.93 jdolecek static void
430 1.93 jdolecek wapbl_dkcache_init(struct wapbl *wl)
431 1.93 jdolecek {
432 1.93 jdolecek int error;
433 1.93 jdolecek
434 1.93 jdolecek /* Get disk cache flags */
435 1.93 jdolecek error = VOP_IOCTL(wl->wl_devvp, DIOCGCACHE, &wl->wl_dkcache,
436 1.93 jdolecek FWRITE, FSCRED);
437 1.93 jdolecek if (error) {
438 1.93 jdolecek /* behave as if there was a write cache */
439 1.93 jdolecek wl->wl_dkcache = DKCACHE_WRITE;
440 1.93 jdolecek }
441 1.93 jdolecek
442 1.93 jdolecek /* Use FUA instead of cache flush if available */
443 1.101 jdolecek if (ISSET(wl->wl_dkcache, DKCACHE_FUA))
444 1.93 jdolecek wl->wl_jwrite_flags |= B_MEDIA_FUA;
445 1.93 jdolecek
446 1.93 jdolecek /* Use DPO for journal writes if available */
447 1.93 jdolecek if (ISSET(wl->wl_dkcache, DKCACHE_DPO))
448 1.93 jdolecek wl->wl_jwrite_flags |= B_MEDIA_DPO;
449 1.93 jdolecek }
450 1.93 jdolecek
451 1.39 christos static int
452 1.15 joerg wapbl_start_flush_inodes(struct wapbl *wl, struct wapbl_replay *wr)
453 1.15 joerg {
454 1.15 joerg int error, i;
455 1.15 joerg
456 1.15 joerg WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
457 1.15 joerg ("wapbl_start: reusing log with %d inodes\n", wr->wr_inodescnt));
458 1.15 joerg
459 1.15 joerg /*
460 1.15 joerg * Its only valid to reuse the replay log if its
461 1.15 joerg * the same as the new log we just opened.
462 1.15 joerg */
463 1.15 joerg KDASSERT(!wapbl_replay_isopen(wr));
464 1.47 christos KASSERT(wl->wl_devvp->v_type == VBLK);
465 1.47 christos KASSERT(wr->wr_devvp->v_type == VBLK);
466 1.15 joerg KASSERT(wl->wl_devvp->v_rdev == wr->wr_devvp->v_rdev);
467 1.15 joerg KASSERT(wl->wl_logpbn == wr->wr_logpbn);
468 1.15 joerg KASSERT(wl->wl_circ_size == wr->wr_circ_size);
469 1.15 joerg KASSERT(wl->wl_circ_off == wr->wr_circ_off);
470 1.15 joerg KASSERT(wl->wl_log_dev_bshift == wr->wr_log_dev_bshift);
471 1.15 joerg KASSERT(wl->wl_fs_dev_bshift == wr->wr_fs_dev_bshift);
472 1.15 joerg
473 1.15 joerg wl->wl_wc_header->wc_generation = wr->wr_generation + 1;
474 1.15 joerg
475 1.15 joerg for (i = 0; i < wr->wr_inodescnt; i++)
476 1.15 joerg wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
477 1.15 joerg wr->wr_inodes[i].wr_imode);
478 1.15 joerg
479 1.15 joerg /* Make sure new transaction won't overwrite old inodes list */
480 1.91 riastrad KDASSERT(wapbl_transaction_len(wl) <=
481 1.15 joerg wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
482 1.114 riastrad wr->wr_inodestail));
483 1.15 joerg
484 1.15 joerg wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
485 1.15 joerg wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
486 1.15 joerg wapbl_transaction_len(wl);
487 1.15 joerg
488 1.15 joerg error = wapbl_write_inodes(wl, &wl->wl_head);
489 1.15 joerg if (error)
490 1.15 joerg return error;
491 1.15 joerg
492 1.15 joerg KASSERT(wl->wl_head != wl->wl_tail);
493 1.15 joerg KASSERT(wl->wl_head != 0);
494 1.15 joerg
495 1.15 joerg return 0;
496 1.15 joerg }
497 1.15 joerg
498 1.2 simonb int
499 1.2 simonb wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
500 1.114 riastrad daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
501 1.114 riastrad wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
502 1.2 simonb {
503 1.2 simonb struct wapbl *wl;
504 1.2 simonb struct vnode *devvp;
505 1.2 simonb daddr_t logpbn;
506 1.2 simonb int error;
507 1.31 mlelstv int log_dev_bshift = ilog2(blksize);
508 1.32 mlelstv int fs_dev_bshift = log_dev_bshift;
509 1.2 simonb int run;
510 1.2 simonb
511 1.114 riastrad WAPBL_PRINTF(WAPBL_PRINT_OPEN,
512 1.114 riastrad ("wapbl_start: vp=%p off=%"PRId64" count=%zu blksize=%zu\n",
513 1.114 riastrad vp, off, count, blksize));
514 1.2 simonb
515 1.2 simonb if (log_dev_bshift > fs_dev_bshift) {
516 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_OPEN,
517 1.114 riastrad ("wapbl: log device's block size cannot be larger "
518 1.114 riastrad "than filesystem's\n"));
519 1.2 simonb /*
520 1.2 simonb * Not currently implemented, although it could be if
521 1.2 simonb * needed someday.
522 1.2 simonb */
523 1.116 riastrad return SET_ERROR(ENOSYS);
524 1.2 simonb }
525 1.2 simonb
526 1.2 simonb if (off < 0)
527 1.116 riastrad return SET_ERROR(EINVAL);
528 1.2 simonb
529 1.2 simonb if (blksize < DEV_BSIZE)
530 1.116 riastrad return SET_ERROR(EINVAL);
531 1.2 simonb if (blksize % DEV_BSIZE)
532 1.116 riastrad return SET_ERROR(EINVAL);
533 1.2 simonb
534 1.2 simonb /* XXXTODO: verify that the full load is writable */
535 1.2 simonb
536 1.2 simonb /*
537 1.2 simonb * XXX check for minimum log size
538 1.2 simonb * minimum is governed by minimum amount of space
539 1.2 simonb * to complete a transaction. (probably truncate)
540 1.2 simonb */
541 1.2 simonb /* XXX for now pick something minimal */
542 1.2 simonb if ((count * blksize) < MAXPHYS) {
543 1.116 riastrad return SET_ERROR(ENOSPC);
544 1.2 simonb }
545 1.2 simonb
546 1.2 simonb if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
547 1.2 simonb return error;
548 1.2 simonb }
549 1.2 simonb
550 1.2 simonb wl = wapbl_calloc(1, sizeof(*wl));
551 1.2 simonb rw_init(&wl->wl_rwlock);
552 1.2 simonb mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
553 1.2 simonb cv_init(&wl->wl_reclaimable_cv, "wapblrec");
554 1.94 jdolecek TAILQ_INIT(&wl->wl_bufs);
555 1.2 simonb SIMPLEQ_INIT(&wl->wl_entries);
556 1.2 simonb
557 1.2 simonb wl->wl_logvp = vp;
558 1.2 simonb wl->wl_devvp = devvp;
559 1.2 simonb wl->wl_mount = mp;
560 1.2 simonb wl->wl_logpbn = logpbn;
561 1.2 simonb wl->wl_log_dev_bshift = log_dev_bshift;
562 1.2 simonb wl->wl_fs_dev_bshift = fs_dev_bshift;
563 1.2 simonb
564 1.2 simonb wl->wl_flush = flushfn;
565 1.2 simonb wl->wl_flush_abort = flushabortfn;
566 1.2 simonb
567 1.2 simonb /* Reserve two log device blocks for the commit headers */
568 1.2 simonb wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
569 1.34 mlelstv wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
570 1.2 simonb /* truncate the log usage to a multiple of log_dev_bshift */
571 1.2 simonb wl->wl_circ_size >>= wl->wl_log_dev_bshift;
572 1.2 simonb wl->wl_circ_size <<= wl->wl_log_dev_bshift;
573 1.2 simonb
574 1.2 simonb /*
575 1.2 simonb * wl_bufbytes_max limits the size of the in memory transaction space.
576 1.2 simonb * - Since buffers are allocated and accounted for in units of
577 1.2 simonb * PAGE_SIZE it is required to be a multiple of PAGE_SIZE
578 1.2 simonb * (i.e. 1<<PAGE_SHIFT)
579 1.2 simonb * - Since the log device has to be written in units of
580 1.111 andvar * 1<<wl_log_dev_bshift it is required to be a multiple of
581 1.2 simonb * 1<<wl_log_dev_bshift.
582 1.2 simonb * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
583 1.2 simonb * it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
584 1.2 simonb * Therefore it must be multiple of the least common multiple of those
585 1.2 simonb * three quantities. Fortunately, all of those quantities are
586 1.2 simonb * guaranteed to be a power of two, and the least common multiple of
587 1.2 simonb * a set of numbers which are all powers of two is simply the maximum
588 1.2 simonb * of those numbers. Finally, the maximum logarithm of a power of two
589 1.2 simonb * is the same as the log of the maximum power of two. So we can do
590 1.2 simonb * the following operations to size wl_bufbytes_max:
591 1.2 simonb */
592 1.2 simonb
593 1.2 simonb /* XXX fix actual number of pages reserved per filesystem. */
594 1.2 simonb wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
595 1.2 simonb
596 1.2 simonb /* Round wl_bufbytes_max to the largest power of two constraint */
597 1.2 simonb wl->wl_bufbytes_max >>= PAGE_SHIFT;
598 1.2 simonb wl->wl_bufbytes_max <<= PAGE_SHIFT;
599 1.2 simonb wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
600 1.2 simonb wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
601 1.2 simonb wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
602 1.2 simonb wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
603 1.2 simonb
604 1.2 simonb /* XXX maybe use filesystem fragment size instead of 1024 */
605 1.2 simonb /* XXX fix actual number of buffers reserved per filesystem. */
606 1.97 chs wl->wl_bufcount_max = (buf_nbuf() / 2) * 1024;
607 1.2 simonb
608 1.79 jdolecek wl->wl_brperjblock = ((1<<wl->wl_log_dev_bshift)
609 1.79 jdolecek - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
610 1.79 jdolecek sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
611 1.79 jdolecek KASSERT(wl->wl_brperjblock > 0);
612 1.79 jdolecek
613 1.2 simonb /* XXX tie this into resource estimation */
614 1.41 hannken wl->wl_dealloclim = wl->wl_bufbytes_max / mp->mnt_stat.f_bsize / 2;
615 1.86 jdolecek TAILQ_INIT(&wl->wl_dealloclist);
616 1.91 riastrad
617 1.2 simonb wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
618 1.2 simonb
619 1.87 jdolecek wapbl_evcnt_init(wl);
620 1.87 jdolecek
621 1.93 jdolecek wapbl_dkcache_init(wl);
622 1.93 jdolecek
623 1.2 simonb /* Initialize the commit header */
624 1.2 simonb {
625 1.2 simonb struct wapbl_wc_header *wc;
626 1.14 joerg size_t len = 1 << wl->wl_log_dev_bshift;
627 1.2 simonb wc = wapbl_calloc(1, len);
628 1.2 simonb wc->wc_type = WAPBL_WC_HEADER;
629 1.2 simonb wc->wc_len = len;
630 1.2 simonb wc->wc_circ_off = wl->wl_circ_off;
631 1.2 simonb wc->wc_circ_size = wl->wl_circ_size;
632 1.2 simonb /* XXX wc->wc_fsid */
633 1.2 simonb wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
634 1.2 simonb wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
635 1.2 simonb wl->wl_wc_header = wc;
636 1.51 para wl->wl_wc_scratch = wapbl_alloc(len);
637 1.2 simonb }
638 1.2 simonb
639 1.95 jdolecek TAILQ_INIT(&wl->wl_iobufs);
640 1.95 jdolecek TAILQ_INIT(&wl->wl_iobufs_busy);
641 1.95 jdolecek for (int i = 0; i < wapbl_journal_iobufs; i++) {
642 1.95 jdolecek struct buf *bp;
643 1.95 jdolecek
644 1.95 jdolecek if ((bp = geteblk(MAXPHYS)) == NULL)
645 1.95 jdolecek goto errout;
646 1.95 jdolecek
647 1.95 jdolecek mutex_enter(&bufcache_lock);
648 1.95 jdolecek mutex_enter(devvp->v_interlock);
649 1.95 jdolecek bgetvp(devvp, bp);
650 1.95 jdolecek mutex_exit(devvp->v_interlock);
651 1.95 jdolecek mutex_exit(&bufcache_lock);
652 1.95 jdolecek
653 1.95 jdolecek bp->b_dev = devvp->v_rdev;
654 1.95 jdolecek
655 1.95 jdolecek TAILQ_INSERT_TAIL(&wl->wl_iobufs, bp, b_wapbllist);
656 1.95 jdolecek }
657 1.95 jdolecek
658 1.2 simonb /*
659 1.2 simonb * if there was an existing set of unlinked but
660 1.2 simonb * allocated inodes, preserve it in the new
661 1.2 simonb * log.
662 1.2 simonb */
663 1.2 simonb if (wr && wr->wr_inodescnt) {
664 1.15 joerg error = wapbl_start_flush_inodes(wl, wr);
665 1.2 simonb if (error)
666 1.2 simonb goto errout;
667 1.2 simonb }
668 1.2 simonb
669 1.2 simonb error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
670 1.2 simonb if (error) {
671 1.2 simonb goto errout;
672 1.2 simonb }
673 1.2 simonb
674 1.2 simonb *wlp = wl;
675 1.2 simonb #if defined(WAPBL_DEBUG)
676 1.2 simonb wapbl_debug_wl = wl;
677 1.2 simonb #endif
678 1.2 simonb
679 1.2 simonb return 0;
680 1.114 riastrad errout:
681 1.2 simonb wapbl_discard(wl);
682 1.18 yamt wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
683 1.18 yamt wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
684 1.95 jdolecek while (!TAILQ_EMPTY(&wl->wl_iobufs)) {
685 1.95 jdolecek struct buf *bp;
686 1.95 jdolecek
687 1.95 jdolecek bp = TAILQ_FIRST(&wl->wl_iobufs);
688 1.95 jdolecek TAILQ_REMOVE(&wl->wl_iobufs, bp, b_wapbllist);
689 1.95 jdolecek brelse(bp, BC_INVAL);
690 1.95 jdolecek }
691 1.2 simonb wapbl_inodetrk_free(wl);
692 1.18 yamt wapbl_free(wl, sizeof(*wl));
693 1.2 simonb
694 1.2 simonb return error;
695 1.2 simonb }
696 1.2 simonb
697 1.2 simonb /*
698 1.2 simonb * Like wapbl_flush, only discards the transaction
699 1.2 simonb * completely
700 1.2 simonb */
701 1.2 simonb
702 1.2 simonb void
703 1.2 simonb wapbl_discard(struct wapbl *wl)
704 1.2 simonb {
705 1.2 simonb struct wapbl_entry *we;
706 1.81 jdolecek struct wapbl_dealloc *wd;
707 1.2 simonb struct buf *bp;
708 1.2 simonb int i;
709 1.2 simonb
710 1.2 simonb /*
711 1.2 simonb * XXX we may consider using upgrade here
712 1.2 simonb * if we want to call flush from inside a transaction
713 1.2 simonb */
714 1.2 simonb rw_enter(&wl->wl_rwlock, RW_WRITER);
715 1.86 jdolecek wl->wl_flush(wl->wl_mount, TAILQ_FIRST(&wl->wl_dealloclist));
716 1.2 simonb
717 1.2 simonb #ifdef WAPBL_DEBUG_PRINT
718 1.2 simonb {
719 1.2 simonb pid_t pid = -1;
720 1.2 simonb lwpid_t lid = -1;
721 1.2 simonb if (curproc)
722 1.2 simonb pid = curproc->p_pid;
723 1.2 simonb if (curlwp)
724 1.2 simonb lid = curlwp->l_lid;
725 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
726 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
727 1.2 simonb ("wapbl_discard: thread %d.%d discarding "
728 1.114 riastrad "transaction\n"
729 1.114 riastrad "\tbufcount=%zu bufbytes=%zu bcount=%zu "
730 1.114 riastrad "deallocs=%d inodes=%d\n"
731 1.114 riastrad "\terrcnt = %u, reclaimable=%zu reserved=%zu "
732 1.114 riastrad "unsynced=%zu\n",
733 1.114 riastrad pid, lid,
734 1.114 riastrad wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
735 1.114 riastrad wl->wl_dealloccnt, wl->wl_inohashcnt,
736 1.114 riastrad wl->wl_error_count, wl->wl_reclaimable_bytes,
737 1.114 riastrad wl->wl_reserved_bytes,
738 1.114 riastrad wl->wl_unsynced_bufbytes));
739 1.2 simonb SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
740 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
741 1.2 simonb ("\tentry: bufcount = %zu, reclaimable = %zu, "
742 1.114 riastrad "error = %d, unsynced = %zu\n",
743 1.114 riastrad we->we_bufcount, we->we_reclaimable_bytes,
744 1.114 riastrad we->we_error, we->we_unsynced_bufbytes));
745 1.2 simonb }
746 1.2 simonb #else /* !WAPBL_DEBUG_BUFBYTES */
747 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
748 1.2 simonb ("wapbl_discard: thread %d.%d discarding transaction\n"
749 1.114 riastrad "\tbufcount=%zu bufbytes=%zu bcount=%zu "
750 1.114 riastrad "deallocs=%d inodes=%d\n"
751 1.114 riastrad "\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
752 1.114 riastrad pid, lid,
753 1.114 riastrad wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
754 1.114 riastrad wl->wl_dealloccnt, wl->wl_inohashcnt,
755 1.114 riastrad wl->wl_error_count, wl->wl_reclaimable_bytes,
756 1.114 riastrad wl->wl_reserved_bytes));
757 1.2 simonb SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
758 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
759 1.2 simonb ("\tentry: bufcount = %zu, reclaimable = %zu, "
760 1.114 riastrad "error = %d\n",
761 1.114 riastrad we->we_bufcount, we->we_reclaimable_bytes,
762 1.114 riastrad we->we_error));
763 1.2 simonb }
764 1.2 simonb #endif /* !WAPBL_DEBUG_BUFBYTES */
765 1.2 simonb }
766 1.2 simonb #endif /* WAPBL_DEBUG_PRINT */
767 1.2 simonb
768 1.2 simonb for (i = 0; i <= wl->wl_inohashmask; i++) {
769 1.2 simonb struct wapbl_ino_head *wih;
770 1.2 simonb struct wapbl_ino *wi;
771 1.2 simonb
772 1.2 simonb wih = &wl->wl_inohash[i];
773 1.2 simonb while ((wi = LIST_FIRST(wih)) != NULL) {
774 1.2 simonb LIST_REMOVE(wi, wi_hash);
775 1.2 simonb pool_put(&wapbl_ino_pool, wi);
776 1.2 simonb KASSERT(wl->wl_inohashcnt > 0);
777 1.2 simonb wl->wl_inohashcnt--;
778 1.2 simonb }
779 1.2 simonb }
780 1.2 simonb
781 1.2 simonb /*
782 1.2 simonb * clean buffer list
783 1.2 simonb */
784 1.2 simonb mutex_enter(&bufcache_lock);
785 1.2 simonb mutex_enter(&wl->wl_mtx);
786 1.94 jdolecek while ((bp = TAILQ_FIRST(&wl->wl_bufs)) != NULL) {
787 1.2 simonb if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
788 1.108 jdolecek KASSERT(bp->b_flags & B_LOCKED);
789 1.108 jdolecek KASSERT(bp->b_oflags & BO_DELWRI);
790 1.2 simonb /*
791 1.108 jdolecek * Buffer is already on BQ_LOCKED queue.
792 1.2 simonb * The buffer will be unlocked and
793 1.108 jdolecek * removed from the transaction in brelsel()
794 1.2 simonb */
795 1.2 simonb mutex_exit(&wl->wl_mtx);
796 1.108 jdolecek bremfree(bp);
797 1.108 jdolecek brelsel(bp, BC_INVAL);
798 1.2 simonb mutex_enter(&wl->wl_mtx);
799 1.2 simonb }
800 1.2 simonb }
801 1.2 simonb
802 1.2 simonb /*
803 1.2 simonb * Remove references to this wl from wl_entries, free any which
804 1.107 jdolecek * no longer have buffers, others will be freed in wapbl_biodone()
805 1.2 simonb * when they no longer have any buffers.
806 1.2 simonb */
807 1.2 simonb while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
808 1.2 simonb SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
809 1.2 simonb /* XXX should we be accumulating wl_error_count
810 1.2 simonb * and increasing reclaimable bytes ? */
811 1.2 simonb we->we_wapbl = NULL;
812 1.2 simonb if (we->we_bufcount == 0) {
813 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
814 1.2 simonb KASSERT(we->we_unsynced_bufbytes == 0);
815 1.2 simonb #endif
816 1.51 para pool_put(&wapbl_entry_pool, we);
817 1.2 simonb }
818 1.2 simonb }
819 1.2 simonb
820 1.107 jdolecek mutex_exit(&wl->wl_mtx);
821 1.107 jdolecek mutex_exit(&bufcache_lock);
822 1.107 jdolecek
823 1.2 simonb /* Discard list of deallocs */
824 1.86 jdolecek while ((wd = TAILQ_FIRST(&wl->wl_dealloclist)) != NULL)
825 1.86 jdolecek wapbl_deallocation_free(wl, wd, true);
826 1.81 jdolecek
827 1.2 simonb /* XXX should we clear wl_reserved_bytes? */
828 1.2 simonb
829 1.2 simonb KASSERT(wl->wl_bufbytes == 0);
830 1.2 simonb KASSERT(wl->wl_bcount == 0);
831 1.2 simonb KASSERT(wl->wl_bufcount == 0);
832 1.94 jdolecek KASSERT(TAILQ_EMPTY(&wl->wl_bufs));
833 1.2 simonb KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
834 1.2 simonb KASSERT(wl->wl_inohashcnt == 0);
835 1.86 jdolecek KASSERT(TAILQ_EMPTY(&wl->wl_dealloclist));
836 1.81 jdolecek KASSERT(wl->wl_dealloccnt == 0);
837 1.2 simonb
838 1.2 simonb rw_exit(&wl->wl_rwlock);
839 1.2 simonb }
840 1.2 simonb
841 1.2 simonb int
842 1.2 simonb wapbl_stop(struct wapbl *wl, int force)
843 1.2 simonb {
844 1.2 simonb int error;
845 1.2 simonb
846 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
847 1.2 simonb error = wapbl_flush(wl, 1);
848 1.2 simonb if (error) {
849 1.2 simonb if (force)
850 1.2 simonb wapbl_discard(wl);
851 1.2 simonb else
852 1.2 simonb return error;
853 1.2 simonb }
854 1.2 simonb
855 1.2 simonb /* Unlinked inodes persist after a flush */
856 1.2 simonb if (wl->wl_inohashcnt) {
857 1.2 simonb if (force) {
858 1.2 simonb wapbl_discard(wl);
859 1.2 simonb } else {
860 1.116 riastrad return SET_ERROR(EBUSY);
861 1.2 simonb }
862 1.2 simonb }
863 1.2 simonb
864 1.2 simonb KASSERT(wl->wl_bufbytes == 0);
865 1.2 simonb KASSERT(wl->wl_bcount == 0);
866 1.2 simonb KASSERT(wl->wl_bufcount == 0);
867 1.94 jdolecek KASSERT(TAILQ_EMPTY(&wl->wl_bufs));
868 1.2 simonb KASSERT(wl->wl_dealloccnt == 0);
869 1.2 simonb KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
870 1.2 simonb KASSERT(wl->wl_inohashcnt == 0);
871 1.86 jdolecek KASSERT(TAILQ_EMPTY(&wl->wl_dealloclist));
872 1.81 jdolecek KASSERT(wl->wl_dealloccnt == 0);
873 1.95 jdolecek KASSERT(TAILQ_EMPTY(&wl->wl_iobufs_busy));
874 1.2 simonb
875 1.18 yamt wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
876 1.18 yamt wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
877 1.95 jdolecek while (!TAILQ_EMPTY(&wl->wl_iobufs)) {
878 1.95 jdolecek struct buf *bp;
879 1.95 jdolecek
880 1.95 jdolecek bp = TAILQ_FIRST(&wl->wl_iobufs);
881 1.95 jdolecek TAILQ_REMOVE(&wl->wl_iobufs, bp, b_wapbllist);
882 1.95 jdolecek brelse(bp, BC_INVAL);
883 1.95 jdolecek }
884 1.2 simonb wapbl_inodetrk_free(wl);
885 1.2 simonb
886 1.87 jdolecek wapbl_evcnt_free(wl);
887 1.87 jdolecek
888 1.2 simonb cv_destroy(&wl->wl_reclaimable_cv);
889 1.2 simonb mutex_destroy(&wl->wl_mtx);
890 1.2 simonb rw_destroy(&wl->wl_rwlock);
891 1.18 yamt wapbl_free(wl, sizeof(*wl));
892 1.2 simonb
893 1.2 simonb return 0;
894 1.2 simonb }
895 1.2 simonb
896 1.71 riastrad /****************************************************************/
897 1.71 riastrad /*
898 1.71 riastrad * Unbuffered disk I/O
899 1.71 riastrad */
900 1.71 riastrad
901 1.95 jdolecek static void
902 1.95 jdolecek wapbl_doio_accounting(struct vnode *devvp, int flags)
903 1.2 simonb {
904 1.2 simonb struct pstats *pstats = curlwp->l_proc->p_stats;
905 1.2 simonb
906 1.2 simonb if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
907 1.45 rmind mutex_enter(devvp->v_interlock);
908 1.2 simonb devvp->v_numoutput++;
909 1.45 rmind mutex_exit(devvp->v_interlock);
910 1.2 simonb pstats->p_ru.ru_oublock++;
911 1.2 simonb } else {
912 1.2 simonb pstats->p_ru.ru_inblock++;
913 1.2 simonb }
914 1.2 simonb
915 1.95 jdolecek }
916 1.95 jdolecek
917 1.95 jdolecek static int
918 1.95 jdolecek wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
919 1.95 jdolecek {
920 1.95 jdolecek struct buf *bp;
921 1.95 jdolecek int error;
922 1.95 jdolecek
923 1.95 jdolecek KASSERT(devvp->v_type == VBLK);
924 1.95 jdolecek
925 1.95 jdolecek wapbl_doio_accounting(devvp, flags);
926 1.95 jdolecek
927 1.2 simonb bp = getiobuf(devvp, true);
928 1.2 simonb bp->b_flags = flags;
929 1.105 ad bp->b_cflags |= BC_BUSY; /* mandatory, asserted by biowait() */
930 1.2 simonb bp->b_dev = devvp->v_rdev;
931 1.2 simonb bp->b_data = data;
932 1.2 simonb bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
933 1.2 simonb bp->b_blkno = pbn;
934 1.52 chs BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
935 1.2 simonb
936 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_IO,
937 1.29 pooka ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%"PRIx64"\n",
938 1.114 riastrad BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
939 1.114 riastrad bp->b_blkno, bp->b_dev));
940 1.2 simonb
941 1.2 simonb VOP_STRATEGY(devvp, bp);
942 1.2 simonb
943 1.2 simonb error = biowait(bp);
944 1.2 simonb putiobuf(bp);
945 1.2 simonb
946 1.2 simonb if (error) {
947 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_ERROR,
948 1.2 simonb ("wapbl_doio: %s %zu bytes at block %" PRId64
949 1.114 riastrad " on dev 0x%"PRIx64" failed with error %d\n",
950 1.114 riastrad (((flags & (B_WRITE | B_READ)) == B_WRITE) ?
951 1.114 riastrad "write" : "read"),
952 1.114 riastrad len, pbn, devvp->v_rdev, error));
953 1.2 simonb }
954 1.2 simonb
955 1.2 simonb return error;
956 1.2 simonb }
957 1.2 simonb
958 1.71 riastrad /*
959 1.71 riastrad * wapbl_write(data, len, devvp, pbn)
960 1.71 riastrad *
961 1.71 riastrad * Synchronously write len bytes from data to physical block pbn
962 1.71 riastrad * on devvp.
963 1.71 riastrad */
964 1.2 simonb int
965 1.2 simonb wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
966 1.2 simonb {
967 1.2 simonb
968 1.2 simonb return wapbl_doio(data, len, devvp, pbn, B_WRITE);
969 1.2 simonb }
970 1.2 simonb
971 1.71 riastrad /*
972 1.71 riastrad * wapbl_read(data, len, devvp, pbn)
973 1.71 riastrad *
974 1.71 riastrad * Synchronously read len bytes into data from physical block pbn
975 1.71 riastrad * on devvp.
976 1.71 riastrad */
977 1.2 simonb int
978 1.2 simonb wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
979 1.2 simonb {
980 1.2 simonb
981 1.2 simonb return wapbl_doio(data, len, devvp, pbn, B_READ);
982 1.2 simonb }
983 1.2 simonb
984 1.71 riastrad /****************************************************************/
985 1.71 riastrad /*
986 1.71 riastrad * Buffered disk writes -- try to coalesce writes and emit
987 1.71 riastrad * MAXPHYS-aligned blocks.
988 1.71 riastrad */
989 1.71 riastrad
990 1.2 simonb /*
991 1.95 jdolecek * wapbl_buffered_write_async(wl, bp)
992 1.95 jdolecek *
993 1.95 jdolecek * Send buffer for asynchronous write.
994 1.95 jdolecek */
995 1.95 jdolecek static void
996 1.95 jdolecek wapbl_buffered_write_async(struct wapbl *wl, struct buf *bp)
997 1.95 jdolecek {
998 1.114 riastrad
999 1.95 jdolecek wapbl_doio_accounting(wl->wl_devvp, bp->b_flags);
1000 1.95 jdolecek
1001 1.95 jdolecek KASSERT(TAILQ_FIRST(&wl->wl_iobufs) == bp);
1002 1.95 jdolecek TAILQ_REMOVE(&wl->wl_iobufs, bp, b_wapbllist);
1003 1.95 jdolecek
1004 1.101 jdolecek bp->b_flags |= B_WRITE;
1005 1.105 ad bp->b_cflags |= BC_BUSY; /* mandatory, asserted by biowait() */
1006 1.95 jdolecek bp->b_oflags = 0;
1007 1.95 jdolecek bp->b_bcount = bp->b_resid;
1008 1.95 jdolecek BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
1009 1.95 jdolecek
1010 1.95 jdolecek VOP_STRATEGY(wl->wl_devvp, bp);
1011 1.95 jdolecek
1012 1.95 jdolecek wl->wl_ev_journalwrite.ev_count++;
1013 1.95 jdolecek
1014 1.95 jdolecek TAILQ_INSERT_TAIL(&wl->wl_iobufs_busy, bp, b_wapbllist);
1015 1.95 jdolecek }
1016 1.95 jdolecek
1017 1.95 jdolecek /*
1018 1.71 riastrad * wapbl_buffered_flush(wl)
1019 1.71 riastrad *
1020 1.71 riastrad * Flush any buffered writes from wapbl_buffered_write.
1021 1.54 hannken */
1022 1.54 hannken static int
1023 1.95 jdolecek wapbl_buffered_flush(struct wapbl *wl, bool full)
1024 1.54 hannken {
1025 1.95 jdolecek int error = 0;
1026 1.95 jdolecek struct buf *bp, *bnext;
1027 1.95 jdolecek bool only_done = true, found = false;
1028 1.95 jdolecek
1029 1.95 jdolecek /* if there is outstanding buffered write, send it now */
1030 1.95 jdolecek if ((bp = TAILQ_FIRST(&wl->wl_iobufs)) && bp->b_resid > 0)
1031 1.95 jdolecek wapbl_buffered_write_async(wl, bp);
1032 1.95 jdolecek
1033 1.95 jdolecek /* wait for I/O to complete */
1034 1.95 jdolecek again:
1035 1.95 jdolecek TAILQ_FOREACH_SAFE(bp, &wl->wl_iobufs_busy, b_wapbllist, bnext) {
1036 1.95 jdolecek if (!full && only_done) {
1037 1.95 jdolecek /* skip unfinished */
1038 1.95 jdolecek if (!ISSET(bp->b_oflags, BO_DONE))
1039 1.95 jdolecek continue;
1040 1.95 jdolecek }
1041 1.114 riastrad
1042 1.95 jdolecek if (ISSET(bp->b_oflags, BO_DONE))
1043 1.95 jdolecek wl->wl_ev_jbufs_bio_nowait.ev_count++;
1044 1.95 jdolecek
1045 1.95 jdolecek TAILQ_REMOVE(&wl->wl_iobufs_busy, bp, b_wapbllist);
1046 1.95 jdolecek error = biowait(bp);
1047 1.54 hannken
1048 1.95 jdolecek /* reset for reuse */
1049 1.101 jdolecek bp->b_blkno = bp->b_resid = bp->b_flags = 0;
1050 1.95 jdolecek TAILQ_INSERT_TAIL(&wl->wl_iobufs, bp, b_wapbllist);
1051 1.95 jdolecek found = true;
1052 1.54 hannken
1053 1.95 jdolecek if (!full)
1054 1.95 jdolecek break;
1055 1.95 jdolecek }
1056 1.54 hannken
1057 1.95 jdolecek if (!found && only_done && !TAILQ_EMPTY(&wl->wl_iobufs_busy)) {
1058 1.95 jdolecek only_done = false;
1059 1.95 jdolecek goto again;
1060 1.95 jdolecek }
1061 1.87 jdolecek
1062 1.54 hannken return error;
1063 1.54 hannken }
1064 1.54 hannken
1065 1.54 hannken /*
1066 1.71 riastrad * wapbl_buffered_write(data, len, wl, pbn)
1067 1.71 riastrad *
1068 1.71 riastrad * Write len bytes from data to physical block pbn on
1069 1.71 riastrad * wl->wl_devvp. The write may not complete until
1070 1.71 riastrad * wapbl_buffered_flush.
1071 1.54 hannken */
1072 1.54 hannken static int
1073 1.101 jdolecek wapbl_buffered_write(void *data, size_t len, struct wapbl *wl, daddr_t pbn,
1074 1.101 jdolecek int bflags)
1075 1.54 hannken {
1076 1.54 hannken size_t resid;
1077 1.95 jdolecek struct buf *bp;
1078 1.95 jdolecek
1079 1.95 jdolecek again:
1080 1.95 jdolecek bp = TAILQ_FIRST(&wl->wl_iobufs);
1081 1.95 jdolecek
1082 1.95 jdolecek if (bp == NULL) {
1083 1.95 jdolecek /* No more buffers, wait for any previous I/O to finish. */
1084 1.95 jdolecek wapbl_buffered_flush(wl, false);
1085 1.95 jdolecek
1086 1.95 jdolecek bp = TAILQ_FIRST(&wl->wl_iobufs);
1087 1.95 jdolecek KASSERT(bp != NULL);
1088 1.95 jdolecek }
1089 1.54 hannken
1090 1.54 hannken /*
1091 1.54 hannken * If not adjacent to buffered data flush first. Disk block
1092 1.54 hannken * address is always valid for non-empty buffer.
1093 1.54 hannken */
1094 1.95 jdolecek if ((bp->b_resid > 0 && pbn != bp->b_blkno + btodb(bp->b_resid))) {
1095 1.95 jdolecek wapbl_buffered_write_async(wl, bp);
1096 1.95 jdolecek goto again;
1097 1.54 hannken }
1098 1.95 jdolecek
1099 1.54 hannken /*
1100 1.54 hannken * If this write goes to an empty buffer we have to
1101 1.54 hannken * save the disk block address first.
1102 1.54 hannken */
1103 1.101 jdolecek if (bp->b_blkno == 0) {
1104 1.95 jdolecek bp->b_blkno = pbn;
1105 1.101 jdolecek bp->b_flags |= bflags;
1106 1.101 jdolecek }
1107 1.95 jdolecek
1108 1.54 hannken /*
1109 1.95 jdolecek * Remaining space so this buffer ends on a buffer size boundary.
1110 1.54 hannken *
1111 1.54 hannken * Cannot become less or equal zero as the buffer would have been
1112 1.54 hannken * flushed on the last call then.
1113 1.54 hannken */
1114 1.95 jdolecek resid = bp->b_bufsize - dbtob(bp->b_blkno % btodb(bp->b_bufsize)) -
1115 1.95 jdolecek bp->b_resid;
1116 1.54 hannken KASSERT(resid > 0);
1117 1.54 hannken KASSERT(dbtob(btodb(resid)) == resid);
1118 1.95 jdolecek
1119 1.95 jdolecek if (len < resid)
1120 1.95 jdolecek resid = len;
1121 1.95 jdolecek
1122 1.95 jdolecek memcpy((uint8_t *)bp->b_data + bp->b_resid, data, resid);
1123 1.95 jdolecek bp->b_resid += resid;
1124 1.95 jdolecek
1125 1.54 hannken if (len >= resid) {
1126 1.95 jdolecek /* Just filled the buf, or data did not fit */
1127 1.95 jdolecek wapbl_buffered_write_async(wl, bp);
1128 1.95 jdolecek
1129 1.54 hannken data = (uint8_t *)data + resid;
1130 1.54 hannken len -= resid;
1131 1.95 jdolecek pbn += btodb(resid);
1132 1.95 jdolecek
1133 1.95 jdolecek if (len > 0)
1134 1.95 jdolecek goto again;
1135 1.54 hannken }
1136 1.54 hannken
1137 1.54 hannken return 0;
1138 1.54 hannken }
1139 1.54 hannken
1140 1.54 hannken /*
1141 1.71 riastrad * wapbl_circ_write(wl, data, len, offp)
1142 1.71 riastrad *
1143 1.71 riastrad * Write len bytes from data to the circular queue of wl, starting
1144 1.71 riastrad * at linear byte offset *offp, and returning the new linear byte
1145 1.71 riastrad * offset in *offp.
1146 1.71 riastrad *
1147 1.71 riastrad * If the starting linear byte offset precedes wl->wl_circ_off,
1148 1.71 riastrad * the write instead begins at wl->wl_circ_off. XXX WTF? This
1149 1.71 riastrad * should be a KASSERT, not a conditional.
1150 1.71 riastrad *
1151 1.71 riastrad * The write is buffered in wl and must be flushed with
1152 1.71 riastrad * wapbl_buffered_flush before it will be submitted to the disk.
1153 1.2 simonb */
1154 1.2 simonb static int
1155 1.2 simonb wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
1156 1.2 simonb {
1157 1.2 simonb size_t slen;
1158 1.2 simonb off_t off = *offp;
1159 1.2 simonb int error;
1160 1.34 mlelstv daddr_t pbn;
1161 1.2 simonb
1162 1.114 riastrad KDASSERT(((len >> wl->wl_log_dev_bshift) << wl->wl_log_dev_bshift) ==
1163 1.114 riastrad len);
1164 1.2 simonb
1165 1.2 simonb if (off < wl->wl_circ_off)
1166 1.2 simonb off = wl->wl_circ_off;
1167 1.2 simonb slen = wl->wl_circ_off + wl->wl_circ_size - off;
1168 1.2 simonb if (slen < len) {
1169 1.34 mlelstv pbn = wl->wl_logpbn + (off >> wl->wl_log_dev_bshift);
1170 1.34 mlelstv #ifdef _KERNEL
1171 1.34 mlelstv pbn = btodb(pbn << wl->wl_log_dev_bshift);
1172 1.34 mlelstv #endif
1173 1.101 jdolecek error = wapbl_buffered_write(data, slen, wl, pbn,
1174 1.101 jdolecek WAPBL_JDATA_FLAGS(wl));
1175 1.2 simonb if (error)
1176 1.2 simonb return error;
1177 1.2 simonb data = (uint8_t *)data + slen;
1178 1.2 simonb len -= slen;
1179 1.2 simonb off = wl->wl_circ_off;
1180 1.2 simonb }
1181 1.34 mlelstv pbn = wl->wl_logpbn + (off >> wl->wl_log_dev_bshift);
1182 1.34 mlelstv #ifdef _KERNEL
1183 1.34 mlelstv pbn = btodb(pbn << wl->wl_log_dev_bshift);
1184 1.34 mlelstv #endif
1185 1.101 jdolecek error = wapbl_buffered_write(data, len, wl, pbn,
1186 1.101 jdolecek WAPBL_JDATA_FLAGS(wl));
1187 1.2 simonb if (error)
1188 1.2 simonb return error;
1189 1.2 simonb off += len;
1190 1.2 simonb if (off >= wl->wl_circ_off + wl->wl_circ_size)
1191 1.2 simonb off = wl->wl_circ_off;
1192 1.2 simonb *offp = off;
1193 1.2 simonb return 0;
1194 1.2 simonb }
1195 1.2 simonb
1196 1.2 simonb /****************************************************************/
1197 1.71 riastrad /*
1198 1.71 riastrad * WAPBL transactions: entering, adding/removing bufs, and exiting
1199 1.71 riastrad */
1200 1.2 simonb
1201 1.2 simonb int
1202 1.2 simonb wapbl_begin(struct wapbl *wl, const char *file, int line)
1203 1.2 simonb {
1204 1.2 simonb int doflush;
1205 1.2 simonb unsigned lockcount;
1206 1.2 simonb
1207 1.2 simonb KDASSERT(wl);
1208 1.2 simonb
1209 1.2 simonb /*
1210 1.2 simonb * XXX this needs to be made much more sophisticated.
1211 1.2 simonb * perhaps each wapbl_begin could reserve a specified
1212 1.2 simonb * number of buffers and bytes.
1213 1.2 simonb */
1214 1.2 simonb mutex_enter(&wl->wl_mtx);
1215 1.2 simonb lockcount = wl->wl_lock_count;
1216 1.2 simonb doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) >
1217 1.114 riastrad wl->wl_bufbytes_max / 2) ||
1218 1.114 riastrad ((wl->wl_bufcount + (lockcount * 10)) >
1219 1.114 riastrad wl->wl_bufcount_max / 2) ||
1220 1.114 riastrad (wapbl_transaction_len(wl) > wl->wl_circ_size / 2) ||
1221 1.114 riastrad (wl->wl_dealloccnt >= (wl->wl_dealloclim / 2));
1222 1.2 simonb mutex_exit(&wl->wl_mtx);
1223 1.2 simonb
1224 1.2 simonb if (doflush) {
1225 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1226 1.2 simonb ("force flush lockcnt=%d bufbytes=%zu "
1227 1.114 riastrad "(max=%zu) bufcount=%zu (max=%zu) "
1228 1.114 riastrad "dealloccnt %d (lim=%d)\n",
1229 1.114 riastrad lockcount, wl->wl_bufbytes,
1230 1.114 riastrad wl->wl_bufbytes_max, wl->wl_bufcount,
1231 1.114 riastrad wl->wl_bufcount_max,
1232 1.114 riastrad wl->wl_dealloccnt, wl->wl_dealloclim));
1233 1.2 simonb }
1234 1.2 simonb
1235 1.2 simonb if (doflush) {
1236 1.2 simonb int error = wapbl_flush(wl, 0);
1237 1.2 simonb if (error)
1238 1.2 simonb return error;
1239 1.2 simonb }
1240 1.2 simonb
1241 1.23 ad rw_enter(&wl->wl_rwlock, RW_READER);
1242 1.2 simonb mutex_enter(&wl->wl_mtx);
1243 1.2 simonb wl->wl_lock_count++;
1244 1.2 simonb mutex_exit(&wl->wl_mtx);
1245 1.2 simonb
1246 1.23 ad #if defined(WAPBL_DEBUG_PRINT)
1247 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
1248 1.2 simonb ("wapbl_begin thread %d.%d with bufcount=%zu "
1249 1.114 riastrad "bufbytes=%zu bcount=%zu at %s:%d\n",
1250 1.114 riastrad curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1251 1.114 riastrad wl->wl_bufbytes, wl->wl_bcount, file, line));
1252 1.2 simonb #endif
1253 1.2 simonb
1254 1.2 simonb return 0;
1255 1.2 simonb }
1256 1.2 simonb
1257 1.2 simonb void
1258 1.2 simonb wapbl_end(struct wapbl *wl)
1259 1.2 simonb {
1260 1.2 simonb
1261 1.23 ad #if defined(WAPBL_DEBUG_PRINT)
1262 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
1263 1.114 riastrad ("wapbl_end thread %d.%d with bufcount=%zu "
1264 1.114 riastrad "bufbytes=%zu bcount=%zu\n",
1265 1.114 riastrad curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1266 1.114 riastrad wl->wl_bufbytes, wl->wl_bcount));
1267 1.2 simonb #endif
1268 1.2 simonb
1269 1.65 riastrad /*
1270 1.65 riastrad * XXX this could be handled more gracefully, perhaps place
1271 1.65 riastrad * only a partial transaction in the log and allow the
1272 1.65 riastrad * remaining to flush without the protection of the journal.
1273 1.65 riastrad */
1274 1.67 riastrad KASSERTMSG((wapbl_transaction_len(wl) <=
1275 1.67 riastrad (wl->wl_circ_size - wl->wl_reserved_bytes)),
1276 1.65 riastrad "wapbl_end: current transaction too big to flush");
1277 1.40 bouyer
1278 1.2 simonb mutex_enter(&wl->wl_mtx);
1279 1.2 simonb KASSERT(wl->wl_lock_count > 0);
1280 1.2 simonb wl->wl_lock_count--;
1281 1.2 simonb mutex_exit(&wl->wl_mtx);
1282 1.2 simonb
1283 1.2 simonb rw_exit(&wl->wl_rwlock);
1284 1.2 simonb }
1285 1.2 simonb
1286 1.2 simonb void
1287 1.2 simonb wapbl_add_buf(struct wapbl *wl, struct buf * bp)
1288 1.2 simonb {
1289 1.2 simonb
1290 1.2 simonb KASSERT(bp->b_cflags & BC_BUSY);
1291 1.2 simonb KASSERT(bp->b_vp);
1292 1.2 simonb
1293 1.2 simonb wapbl_jlock_assert(wl);
1294 1.2 simonb
1295 1.2 simonb #if 0
1296 1.2 simonb /*
1297 1.2 simonb * XXX this might be an issue for swapfiles.
1298 1.2 simonb * see uvm_swap.c:1702
1299 1.2 simonb *
1300 1.2 simonb * XXX2 why require it then? leap of semantics?
1301 1.2 simonb */
1302 1.2 simonb KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
1303 1.2 simonb #endif
1304 1.2 simonb
1305 1.2 simonb mutex_enter(&wl->wl_mtx);
1306 1.2 simonb if (bp->b_flags & B_LOCKED) {
1307 1.94 jdolecek TAILQ_REMOVE(&wl->wl_bufs, bp, b_wapbllist);
1308 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
1309 1.114 riastrad ("wapbl_add_buf thread %d.%d re-adding buf %p "
1310 1.114 riastrad "with %d bytes %d bcount\n",
1311 1.114 riastrad curproc->p_pid, curlwp->l_lid, bp,
1312 1.114 riastrad bp->b_bufsize, bp->b_bcount));
1313 1.2 simonb } else {
1314 1.2 simonb /* unlocked by dirty buffers shouldn't exist */
1315 1.2 simonb KASSERT(!(bp->b_oflags & BO_DELWRI));
1316 1.2 simonb wl->wl_bufbytes += bp->b_bufsize;
1317 1.2 simonb wl->wl_bcount += bp->b_bcount;
1318 1.2 simonb wl->wl_bufcount++;
1319 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
1320 1.114 riastrad ("wapbl_add_buf thread %d.%d adding buf %p "
1321 1.114 riastrad "with %d bytes %d bcount\n",
1322 1.114 riastrad curproc->p_pid, curlwp->l_lid, bp,
1323 1.114 riastrad bp->b_bufsize, bp->b_bcount));
1324 1.2 simonb }
1325 1.94 jdolecek TAILQ_INSERT_TAIL(&wl->wl_bufs, bp, b_wapbllist);
1326 1.2 simonb mutex_exit(&wl->wl_mtx);
1327 1.2 simonb
1328 1.2 simonb bp->b_flags |= B_LOCKED;
1329 1.2 simonb }
1330 1.2 simonb
1331 1.2 simonb static void
1332 1.2 simonb wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
1333 1.2 simonb {
1334 1.2 simonb
1335 1.2 simonb KASSERT(mutex_owned(&wl->wl_mtx));
1336 1.2 simonb KASSERT(bp->b_cflags & BC_BUSY);
1337 1.2 simonb wapbl_jlock_assert(wl);
1338 1.2 simonb
1339 1.2 simonb #if 0
1340 1.2 simonb /*
1341 1.2 simonb * XXX this might be an issue for swapfiles.
1342 1.2 simonb * see uvm_swap.c:1725
1343 1.2 simonb *
1344 1.2 simonb * XXXdeux: see above
1345 1.2 simonb */
1346 1.2 simonb KASSERT((bp->b_flags & BC_NOCACHE) == 0);
1347 1.2 simonb #endif
1348 1.2 simonb KASSERT(bp->b_flags & B_LOCKED);
1349 1.2 simonb
1350 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
1351 1.114 riastrad ("wapbl_remove_buf thread %d.%d removing buf %p with "
1352 1.114 riastrad "%d bytes %d bcount\n",
1353 1.114 riastrad curproc->p_pid, curlwp->l_lid, bp,
1354 1.114 riastrad bp->b_bufsize, bp->b_bcount));
1355 1.2 simonb
1356 1.2 simonb KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
1357 1.2 simonb wl->wl_bufbytes -= bp->b_bufsize;
1358 1.2 simonb KASSERT(wl->wl_bcount >= bp->b_bcount);
1359 1.2 simonb wl->wl_bcount -= bp->b_bcount;
1360 1.2 simonb KASSERT(wl->wl_bufcount > 0);
1361 1.2 simonb wl->wl_bufcount--;
1362 1.2 simonb KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1363 1.2 simonb KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1364 1.94 jdolecek TAILQ_REMOVE(&wl->wl_bufs, bp, b_wapbllist);
1365 1.2 simonb
1366 1.2 simonb bp->b_flags &= ~B_LOCKED;
1367 1.2 simonb }
1368 1.2 simonb
1369 1.2 simonb /* called from brelsel() in vfs_bio among other places */
1370 1.2 simonb void
1371 1.2 simonb wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
1372 1.2 simonb {
1373 1.2 simonb
1374 1.2 simonb mutex_enter(&wl->wl_mtx);
1375 1.2 simonb wapbl_remove_buf_locked(wl, bp);
1376 1.2 simonb mutex_exit(&wl->wl_mtx);
1377 1.2 simonb }
1378 1.2 simonb
1379 1.2 simonb void
1380 1.2 simonb wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
1381 1.2 simonb {
1382 1.2 simonb
1383 1.2 simonb KASSERT(bp->b_cflags & BC_BUSY);
1384 1.2 simonb
1385 1.2 simonb /*
1386 1.2 simonb * XXX: why does this depend on B_LOCKED? otherwise the buf
1387 1.2 simonb * is not for a transaction? if so, why is this called in the
1388 1.2 simonb * first place?
1389 1.2 simonb */
1390 1.2 simonb if (bp->b_flags & B_LOCKED) {
1391 1.2 simonb mutex_enter(&wl->wl_mtx);
1392 1.2 simonb wl->wl_bufbytes += bp->b_bufsize - oldsz;
1393 1.2 simonb wl->wl_bcount += bp->b_bcount - oldcnt;
1394 1.2 simonb mutex_exit(&wl->wl_mtx);
1395 1.2 simonb }
1396 1.2 simonb }
1397 1.2 simonb
1398 1.2 simonb #endif /* _KERNEL */
1399 1.2 simonb
1400 1.2 simonb /****************************************************************/
1401 1.2 simonb /* Some utility inlines */
1402 1.2 simonb
1403 1.71 riastrad /*
1404 1.71 riastrad * wapbl_space_used(avail, head, tail)
1405 1.71 riastrad *
1406 1.71 riastrad * Number of bytes used in a circular queue of avail total bytes,
1407 1.71 riastrad * from tail to head.
1408 1.71 riastrad */
1409 1.56 joerg static inline size_t
1410 1.56 joerg wapbl_space_used(size_t avail, off_t head, off_t tail)
1411 1.56 joerg {
1412 1.56 joerg
1413 1.56 joerg if (tail == 0) {
1414 1.56 joerg KASSERT(head == 0);
1415 1.56 joerg return 0;
1416 1.56 joerg }
1417 1.56 joerg return ((head + (avail - 1) - tail) % avail) + 1;
1418 1.56 joerg }
1419 1.56 joerg
1420 1.56 joerg #ifdef _KERNEL
1421 1.71 riastrad /*
1422 1.71 riastrad * wapbl_advance(size, off, oldoff, delta)
1423 1.71 riastrad *
1424 1.71 riastrad * Given a byte offset oldoff into a circular queue of size bytes
1425 1.71 riastrad * starting at off, return a new byte offset oldoff + delta into
1426 1.71 riastrad * the circular queue.
1427 1.71 riastrad */
1428 1.30 uebayasi static inline off_t
1429 1.60 matt wapbl_advance(size_t size, size_t off, off_t oldoff, size_t delta)
1430 1.2 simonb {
1431 1.60 matt off_t newoff;
1432 1.2 simonb
1433 1.2 simonb /* Define acceptable ranges for inputs. */
1434 1.46 christos KASSERT(delta <= (size_t)size);
1435 1.114 riastrad KASSERT(oldoff == 0 || (size_t)oldoff >= off);
1436 1.60 matt KASSERT(oldoff < (off_t)(size + off));
1437 1.2 simonb
1438 1.114 riastrad if (oldoff == 0 && delta != 0)
1439 1.60 matt newoff = off + delta;
1440 1.114 riastrad else if (oldoff + delta < size + off)
1441 1.60 matt newoff = oldoff + delta;
1442 1.2 simonb else
1443 1.60 matt newoff = (oldoff + delta) - size;
1444 1.2 simonb
1445 1.2 simonb /* Note some interesting axioms */
1446 1.114 riastrad KASSERT(delta != 0 || newoff == oldoff);
1447 1.114 riastrad KASSERT(delta == 0 || newoff != 0);
1448 1.114 riastrad KASSERT(delta != size || newoff == oldoff);
1449 1.2 simonb
1450 1.2 simonb /* Define acceptable ranges for output. */
1451 1.114 riastrad KASSERT(newoff == 0 || (size_t)newoff >= off);
1452 1.114 riastrad KASSERT((size_t)newoff < size + off);
1453 1.60 matt return newoff;
1454 1.2 simonb }
1455 1.2 simonb
1456 1.71 riastrad /*
1457 1.71 riastrad * wapbl_space_free(avail, head, tail)
1458 1.71 riastrad *
1459 1.71 riastrad * Number of bytes free in a circular queue of avail total bytes,
1460 1.71 riastrad * in which everything from tail to head is used.
1461 1.71 riastrad */
1462 1.30 uebayasi static inline size_t
1463 1.2 simonb wapbl_space_free(size_t avail, off_t head, off_t tail)
1464 1.2 simonb {
1465 1.2 simonb
1466 1.2 simonb return avail - wapbl_space_used(avail, head, tail);
1467 1.2 simonb }
1468 1.2 simonb
1469 1.71 riastrad /*
1470 1.71 riastrad * wapbl_advance_head(size, off, delta, headp, tailp)
1471 1.71 riastrad *
1472 1.71 riastrad * In a circular queue of size bytes starting at off, given the
1473 1.71 riastrad * old head and tail offsets *headp and *tailp, store the new head
1474 1.71 riastrad * and tail offsets in *headp and *tailp resulting from adding
1475 1.71 riastrad * delta bytes of data to the head.
1476 1.71 riastrad */
1477 1.30 uebayasi static inline void
1478 1.2 simonb wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1479 1.114 riastrad off_t *tailp)
1480 1.2 simonb {
1481 1.2 simonb off_t head = *headp;
1482 1.2 simonb off_t tail = *tailp;
1483 1.2 simonb
1484 1.2 simonb KASSERT(delta <= wapbl_space_free(size, head, tail));
1485 1.2 simonb head = wapbl_advance(size, off, head, delta);
1486 1.114 riastrad if (tail == 0 && head != 0)
1487 1.2 simonb tail = off;
1488 1.2 simonb *headp = head;
1489 1.2 simonb *tailp = tail;
1490 1.2 simonb }
1491 1.2 simonb
1492 1.71 riastrad /*
1493 1.71 riastrad * wapbl_advance_tail(size, off, delta, headp, tailp)
1494 1.71 riastrad *
1495 1.71 riastrad * In a circular queue of size bytes starting at off, given the
1496 1.71 riastrad * old head and tail offsets *headp and *tailp, store the new head
1497 1.71 riastrad * and tail offsets in *headp and *tailp resulting from removing
1498 1.71 riastrad * delta bytes of data from the tail.
1499 1.71 riastrad */
1500 1.30 uebayasi static inline void
1501 1.2 simonb wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1502 1.114 riastrad off_t *tailp)
1503 1.2 simonb {
1504 1.2 simonb off_t head = *headp;
1505 1.2 simonb off_t tail = *tailp;
1506 1.2 simonb
1507 1.2 simonb KASSERT(delta <= wapbl_space_used(size, head, tail));
1508 1.2 simonb tail = wapbl_advance(size, off, tail, delta);
1509 1.2 simonb if (head == tail) {
1510 1.2 simonb head = tail = 0;
1511 1.2 simonb }
1512 1.2 simonb *headp = head;
1513 1.2 simonb *tailp = tail;
1514 1.2 simonb }
1515 1.2 simonb
1516 1.2 simonb
1517 1.2 simonb /****************************************************************/
1518 1.2 simonb
1519 1.2 simonb /*
1520 1.73 riastrad * wapbl_truncate(wl, minfree)
1521 1.71 riastrad *
1522 1.71 riastrad * Wait until at least minfree bytes are available in the log.
1523 1.71 riastrad *
1524 1.73 riastrad * If it was necessary to wait for writes to complete,
1525 1.73 riastrad * advance the circular queue tail to reflect the new write
1526 1.73 riastrad * completions and issue a write commit to the log.
1527 1.71 riastrad *
1528 1.71 riastrad * => Caller must hold wl->wl_rwlock writer lock.
1529 1.2 simonb */
1530 1.2 simonb static int
1531 1.73 riastrad wapbl_truncate(struct wapbl *wl, size_t minfree)
1532 1.2 simonb {
1533 1.2 simonb size_t delta;
1534 1.2 simonb size_t avail;
1535 1.2 simonb off_t head;
1536 1.2 simonb off_t tail;
1537 1.2 simonb int error = 0;
1538 1.2 simonb
1539 1.2 simonb KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1540 1.2 simonb KASSERT(rw_write_held(&wl->wl_rwlock));
1541 1.2 simonb
1542 1.2 simonb mutex_enter(&wl->wl_mtx);
1543 1.2 simonb
1544 1.2 simonb /*
1545 1.2 simonb * First check to see if we have to do a commit
1546 1.2 simonb * at all.
1547 1.2 simonb */
1548 1.2 simonb avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1549 1.2 simonb if (minfree < avail) {
1550 1.2 simonb mutex_exit(&wl->wl_mtx);
1551 1.2 simonb return 0;
1552 1.2 simonb }
1553 1.2 simonb minfree -= avail;
1554 1.114 riastrad while (wl->wl_error_count == 0 &&
1555 1.114 riastrad wl->wl_reclaimable_bytes < minfree) {
1556 1.115 riastrad WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1557 1.114 riastrad ("wapbl_truncate: sleeping on %p"
1558 1.114 riastrad " wl=%p bytes=%zd minfree=%zd\n",
1559 1.114 riastrad &wl->wl_reclaimable_bytes,
1560 1.114 riastrad wl, wl->wl_reclaimable_bytes, minfree));
1561 1.2 simonb cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1562 1.2 simonb }
1563 1.2 simonb if (wl->wl_reclaimable_bytes < minfree) {
1564 1.2 simonb KASSERT(wl->wl_error_count);
1565 1.2 simonb /* XXX maybe get actual error from buffer instead someday? */
1566 1.116 riastrad error = SET_ERROR(EIO);
1567 1.2 simonb }
1568 1.2 simonb head = wl->wl_head;
1569 1.2 simonb tail = wl->wl_tail;
1570 1.2 simonb delta = wl->wl_reclaimable_bytes;
1571 1.2 simonb
1572 1.113 msaitoh /* If all of the entries are flushed, then be sure to keep
1573 1.2 simonb * the reserved bytes reserved. Watch out for discarded transactions,
1574 1.2 simonb * which could leave more bytes reserved than are reclaimable.
1575 1.2 simonb */
1576 1.114 riastrad if (SIMPLEQ_EMPTY(&wl->wl_entries) && delta >= wl->wl_reserved_bytes) {
1577 1.2 simonb delta -= wl->wl_reserved_bytes;
1578 1.2 simonb }
1579 1.2 simonb wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1580 1.114 riastrad &tail);
1581 1.2 simonb KDASSERT(wl->wl_reserved_bytes <=
1582 1.114 riastrad wapbl_space_used(wl->wl_circ_size, head, tail));
1583 1.2 simonb mutex_exit(&wl->wl_mtx);
1584 1.2 simonb
1585 1.2 simonb if (error)
1586 1.2 simonb return error;
1587 1.2 simonb
1588 1.2 simonb /*
1589 1.2 simonb * This is where head, tail and delta are unprotected
1590 1.2 simonb * from races against itself or flush. This is ok since
1591 1.2 simonb * we only call this routine from inside flush itself.
1592 1.2 simonb *
1593 1.2 simonb * XXX: how can it race against itself when accessed only
1594 1.2 simonb * from behind the write-locked rwlock?
1595 1.2 simonb */
1596 1.2 simonb error = wapbl_write_commit(wl, head, tail);
1597 1.2 simonb if (error)
1598 1.2 simonb return error;
1599 1.2 simonb
1600 1.2 simonb wl->wl_head = head;
1601 1.2 simonb wl->wl_tail = tail;
1602 1.2 simonb
1603 1.2 simonb mutex_enter(&wl->wl_mtx);
1604 1.2 simonb KASSERT(wl->wl_reclaimable_bytes >= delta);
1605 1.2 simonb wl->wl_reclaimable_bytes -= delta;
1606 1.2 simonb mutex_exit(&wl->wl_mtx);
1607 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1608 1.2 simonb ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1609 1.114 riastrad curproc->p_pid, curlwp->l_lid, delta));
1610 1.2 simonb
1611 1.2 simonb return 0;
1612 1.2 simonb }
1613 1.2 simonb
1614 1.2 simonb /****************************************************************/
1615 1.2 simonb
1616 1.2 simonb void
1617 1.2 simonb wapbl_biodone(struct buf *bp)
1618 1.2 simonb {
1619 1.2 simonb struct wapbl_entry *we = bp->b_private;
1620 1.107 jdolecek struct wapbl *wl;
1621 1.53 hannken #ifdef WAPBL_DEBUG_BUFBYTES
1622 1.53 hannken const int bufsize = bp->b_bufsize;
1623 1.53 hannken #endif
1624 1.2 simonb
1625 1.107 jdolecek mutex_enter(&bufcache_lock);
1626 1.107 jdolecek wl = we->we_wapbl;
1627 1.107 jdolecek mutex_exit(&bufcache_lock);
1628 1.107 jdolecek
1629 1.2 simonb /*
1630 1.2 simonb * Handle possible flushing of buffers after log has been
1631 1.2 simonb * decomissioned.
1632 1.2 simonb */
1633 1.2 simonb if (!wl) {
1634 1.2 simonb KASSERT(we->we_bufcount > 0);
1635 1.2 simonb we->we_bufcount--;
1636 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1637 1.53 hannken KASSERT(we->we_unsynced_bufbytes >= bufsize);
1638 1.53 hannken we->we_unsynced_bufbytes -= bufsize;
1639 1.2 simonb #endif
1640 1.2 simonb
1641 1.2 simonb if (we->we_bufcount == 0) {
1642 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1643 1.2 simonb KASSERT(we->we_unsynced_bufbytes == 0);
1644 1.2 simonb #endif
1645 1.51 para pool_put(&wapbl_entry_pool, we);
1646 1.2 simonb }
1647 1.2 simonb
1648 1.2 simonb brelse(bp, 0);
1649 1.2 simonb return;
1650 1.2 simonb }
1651 1.2 simonb
1652 1.2 simonb #ifdef ohbother
1653 1.44 uebayasi KDASSERT(bp->b_oflags & BO_DONE);
1654 1.44 uebayasi KDASSERT(!(bp->b_oflags & BO_DELWRI));
1655 1.2 simonb KDASSERT(bp->b_flags & B_ASYNC);
1656 1.44 uebayasi KDASSERT(bp->b_cflags & BC_BUSY);
1657 1.2 simonb KDASSERT(!(bp->b_flags & B_LOCKED));
1658 1.2 simonb KDASSERT(!(bp->b_flags & B_READ));
1659 1.44 uebayasi KDASSERT(!(bp->b_cflags & BC_INVAL));
1660 1.44 uebayasi KDASSERT(!(bp->b_cflags & BC_NOCACHE));
1661 1.2 simonb #endif
1662 1.2 simonb
1663 1.2 simonb if (bp->b_error) {
1664 1.26 apb /*
1665 1.78 riastrad * If an error occurs, it would be nice to leave the buffer
1666 1.78 riastrad * as a delayed write on the LRU queue so that we can retry
1667 1.78 riastrad * it later. But buffercache(9) can't handle dirty buffer
1668 1.78 riastrad * reuse, so just mark the log permanently errored out.
1669 1.26 apb */
1670 1.2 simonb mutex_enter(&wl->wl_mtx);
1671 1.2 simonb if (wl->wl_error_count == 0) {
1672 1.2 simonb wl->wl_error_count++;
1673 1.2 simonb cv_broadcast(&wl->wl_reclaimable_cv);
1674 1.2 simonb }
1675 1.2 simonb mutex_exit(&wl->wl_mtx);
1676 1.2 simonb }
1677 1.2 simonb
1678 1.53 hannken /*
1679 1.93 jdolecek * Make sure that the buf doesn't retain the media flags, so that
1680 1.93 jdolecek * e.g. wapbl_allow_fuadpo has immediate effect on any following I/O.
1681 1.93 jdolecek * The flags will be set again if needed by another I/O.
1682 1.93 jdolecek */
1683 1.93 jdolecek bp->b_flags &= ~B_MEDIA_FLAGS;
1684 1.93 jdolecek
1685 1.93 jdolecek /*
1686 1.53 hannken * Release the buffer here. wapbl_flush() may wait for the
1687 1.53 hannken * log to become empty and we better unbusy the buffer before
1688 1.53 hannken * wapbl_flush() returns.
1689 1.53 hannken */
1690 1.53 hannken brelse(bp, 0);
1691 1.53 hannken
1692 1.2 simonb mutex_enter(&wl->wl_mtx);
1693 1.2 simonb
1694 1.2 simonb KASSERT(we->we_bufcount > 0);
1695 1.2 simonb we->we_bufcount--;
1696 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1697 1.53 hannken KASSERT(we->we_unsynced_bufbytes >= bufsize);
1698 1.53 hannken we->we_unsynced_bufbytes -= bufsize;
1699 1.53 hannken KASSERT(wl->wl_unsynced_bufbytes >= bufsize);
1700 1.53 hannken wl->wl_unsynced_bufbytes -= bufsize;
1701 1.2 simonb #endif
1702 1.87 jdolecek wl->wl_ev_metawrite.ev_count++;
1703 1.2 simonb
1704 1.2 simonb /*
1705 1.2 simonb * If the current transaction can be reclaimed, start
1706 1.2 simonb * at the beginning and reclaim any consecutive reclaimable
1707 1.2 simonb * transactions. If we successfully reclaim anything,
1708 1.2 simonb * then wakeup anyone waiting for the reclaim.
1709 1.2 simonb */
1710 1.2 simonb if (we->we_bufcount == 0) {
1711 1.2 simonb size_t delta = 0;
1712 1.2 simonb int errcnt = 0;
1713 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1714 1.2 simonb KDASSERT(we->we_unsynced_bufbytes == 0);
1715 1.2 simonb #endif
1716 1.2 simonb /*
1717 1.2 simonb * clear any posted error, since the buffer it came from
1718 1.2 simonb * has successfully flushed by now
1719 1.2 simonb */
1720 1.2 simonb while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1721 1.114 riastrad we->we_bufcount == 0) {
1722 1.2 simonb delta += we->we_reclaimable_bytes;
1723 1.2 simonb if (we->we_error)
1724 1.2 simonb errcnt++;
1725 1.2 simonb SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1726 1.51 para pool_put(&wapbl_entry_pool, we);
1727 1.2 simonb }
1728 1.2 simonb
1729 1.2 simonb if (delta) {
1730 1.2 simonb wl->wl_reclaimable_bytes += delta;
1731 1.2 simonb KASSERT(wl->wl_error_count >= errcnt);
1732 1.2 simonb wl->wl_error_count -= errcnt;
1733 1.2 simonb cv_broadcast(&wl->wl_reclaimable_cv);
1734 1.2 simonb }
1735 1.2 simonb }
1736 1.2 simonb
1737 1.2 simonb mutex_exit(&wl->wl_mtx);
1738 1.2 simonb }
1739 1.2 simonb
1740 1.2 simonb /*
1741 1.71 riastrad * wapbl_flush(wl, wait)
1742 1.71 riastrad *
1743 1.71 riastrad * Flush pending block writes, deallocations, and inodes from
1744 1.71 riastrad * the current transaction in memory to the log on disk:
1745 1.71 riastrad *
1746 1.71 riastrad * 1. Call the file system's wl_flush callback to flush any
1747 1.71 riastrad * per-file-system pending updates.
1748 1.71 riastrad * 2. Wait for enough space in the log for the current transaction.
1749 1.71 riastrad * 3. Synchronously write the new log records, advancing the
1750 1.71 riastrad * circular queue head.
1751 1.77 riastrad * 4. Issue the pending block writes asynchronously, now that they
1752 1.77 riastrad * are recorded in the log and can be replayed after crash.
1753 1.77 riastrad * 5. If wait is true, wait for all writes to complete and for the
1754 1.77 riastrad * log to become empty.
1755 1.71 riastrad *
1756 1.71 riastrad * On failure, call the file system's wl_flush_abort callback.
1757 1.2 simonb */
1758 1.2 simonb int
1759 1.2 simonb wapbl_flush(struct wapbl *wl, int waitfor)
1760 1.2 simonb {
1761 1.2 simonb struct buf *bp;
1762 1.2 simonb struct wapbl_entry *we;
1763 1.2 simonb off_t off;
1764 1.2 simonb off_t head;
1765 1.2 simonb off_t tail;
1766 1.2 simonb size_t delta = 0;
1767 1.2 simonb size_t flushsize;
1768 1.2 simonb size_t reserved;
1769 1.2 simonb int error = 0;
1770 1.2 simonb
1771 1.2 simonb /*
1772 1.2 simonb * Do a quick check to see if a full flush can be skipped
1773 1.2 simonb * This assumes that the flush callback does not need to be called
1774 1.2 simonb * unless there are other outstanding bufs.
1775 1.2 simonb */
1776 1.2 simonb if (!waitfor) {
1777 1.2 simonb size_t nbufs;
1778 1.2 simonb mutex_enter(&wl->wl_mtx); /* XXX need mutex here to
1779 1.2 simonb protect the KASSERTS */
1780 1.2 simonb nbufs = wl->wl_bufcount;
1781 1.2 simonb KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1782 1.2 simonb KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1783 1.2 simonb mutex_exit(&wl->wl_mtx);
1784 1.2 simonb if (nbufs == 0)
1785 1.2 simonb return 0;
1786 1.2 simonb }
1787 1.2 simonb
1788 1.2 simonb /*
1789 1.2 simonb * XXX we may consider using LK_UPGRADE here
1790 1.2 simonb * if we want to call flush from inside a transaction
1791 1.2 simonb */
1792 1.2 simonb rw_enter(&wl->wl_rwlock, RW_WRITER);
1793 1.86 jdolecek wl->wl_flush(wl->wl_mount, TAILQ_FIRST(&wl->wl_dealloclist));
1794 1.2 simonb
1795 1.2 simonb /*
1796 1.75 riastrad * Now that we are exclusively locked and the file system has
1797 1.75 riastrad * issued any deferred block writes for this transaction, check
1798 1.75 riastrad * whether there are any blocks to write to the log. If not,
1799 1.75 riastrad * skip waiting for space or writing any log entries.
1800 1.75 riastrad *
1801 1.75 riastrad * XXX Shouldn't this also check wl_dealloccnt and
1802 1.75 riastrad * wl_inohashcnt? Perhaps wl_dealloccnt doesn't matter if the
1803 1.75 riastrad * file system didn't produce any blocks as a consequence of
1804 1.75 riastrad * it, but the same does not seem to be so of wl_inohashcnt.
1805 1.2 simonb */
1806 1.2 simonb if (wl->wl_bufcount == 0) {
1807 1.69 riastrad goto wait_out;
1808 1.2 simonb }
1809 1.2 simonb
1810 1.2 simonb #if 0
1811 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1812 1.114 riastrad ("wapbl_flush thread %d.%d flushing entries with "
1813 1.114 riastrad "bufcount=%zu bufbytes=%zu\n",
1814 1.114 riastrad curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1815 1.114 riastrad wl->wl_bufbytes));
1816 1.2 simonb #endif
1817 1.2 simonb
1818 1.2 simonb /* Calculate amount of space needed to flush */
1819 1.2 simonb flushsize = wapbl_transaction_len(wl);
1820 1.39 christos if (wapbl_verbose_commit) {
1821 1.39 christos struct timespec ts;
1822 1.39 christos getnanotime(&ts);
1823 1.43 nakayama printf("%s: %lld.%09ld this transaction = %zu bytes\n",
1824 1.39 christos __func__, (long long)ts.tv_sec,
1825 1.39 christos (long)ts.tv_nsec, flushsize);
1826 1.39 christos }
1827 1.2 simonb
1828 1.2 simonb if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1829 1.2 simonb /*
1830 1.2 simonb * XXX this could be handled more gracefully, perhaps place
1831 1.2 simonb * only a partial transaction in the log and allow the
1832 1.2 simonb * remaining to flush without the protection of the journal.
1833 1.2 simonb */
1834 1.66 riastrad panic("wapbl_flush: current transaction too big to flush");
1835 1.2 simonb }
1836 1.2 simonb
1837 1.73 riastrad error = wapbl_truncate(wl, flushsize);
1838 1.2 simonb if (error)
1839 1.69 riastrad goto out;
1840 1.2 simonb
1841 1.2 simonb off = wl->wl_head;
1842 1.114 riastrad KASSERT(off == 0 || off >= wl->wl_circ_off);
1843 1.114 riastrad KASSERT(off == 0 || off < wl->wl_circ_off + wl->wl_circ_size);
1844 1.2 simonb error = wapbl_write_blocks(wl, &off);
1845 1.2 simonb if (error)
1846 1.69 riastrad goto out;
1847 1.2 simonb error = wapbl_write_revocations(wl, &off);
1848 1.2 simonb if (error)
1849 1.69 riastrad goto out;
1850 1.2 simonb error = wapbl_write_inodes(wl, &off);
1851 1.2 simonb if (error)
1852 1.69 riastrad goto out;
1853 1.2 simonb
1854 1.2 simonb reserved = 0;
1855 1.2 simonb if (wl->wl_inohashcnt)
1856 1.2 simonb reserved = wapbl_transaction_inodes_len(wl);
1857 1.2 simonb
1858 1.2 simonb head = wl->wl_head;
1859 1.2 simonb tail = wl->wl_tail;
1860 1.2 simonb
1861 1.2 simonb wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1862 1.2 simonb &head, &tail);
1863 1.72 riastrad
1864 1.72 riastrad KASSERTMSG(head == off,
1865 1.72 riastrad "lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1866 1.72 riastrad " off=%"PRIdMAX" flush=%zu",
1867 1.72 riastrad (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1868 1.72 riastrad flushsize);
1869 1.2 simonb
1870 1.2 simonb /* Opportunistically move the tail forward if we can */
1871 1.73 riastrad mutex_enter(&wl->wl_mtx);
1872 1.73 riastrad delta = wl->wl_reclaimable_bytes;
1873 1.73 riastrad mutex_exit(&wl->wl_mtx);
1874 1.73 riastrad wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1875 1.73 riastrad &head, &tail);
1876 1.2 simonb
1877 1.2 simonb error = wapbl_write_commit(wl, head, tail);
1878 1.2 simonb if (error)
1879 1.69 riastrad goto out;
1880 1.2 simonb
1881 1.51 para we = pool_get(&wapbl_entry_pool, PR_WAITOK);
1882 1.2 simonb
1883 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1884 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1885 1.114 riastrad ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1886 1.114 riastrad " unsynced=%zu"
1887 1.114 riastrad "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1888 1.114 riastrad "inodes=%d\n",
1889 1.114 riastrad curproc->p_pid, curlwp->l_lid, flushsize, delta,
1890 1.114 riastrad wapbl_space_used(wl->wl_circ_size, head, tail),
1891 1.114 riastrad wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1892 1.114 riastrad wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1893 1.114 riastrad wl->wl_inohashcnt));
1894 1.2 simonb #else
1895 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1896 1.114 riastrad ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1897 1.114 riastrad "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1898 1.114 riastrad "inodes=%d\n",
1899 1.114 riastrad curproc->p_pid, curlwp->l_lid, flushsize, delta,
1900 1.114 riastrad wapbl_space_used(wl->wl_circ_size, head, tail),
1901 1.114 riastrad wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1902 1.114 riastrad wl->wl_dealloccnt, wl->wl_inohashcnt));
1903 1.2 simonb #endif
1904 1.2 simonb
1905 1.2 simonb
1906 1.2 simonb mutex_enter(&bufcache_lock);
1907 1.2 simonb mutex_enter(&wl->wl_mtx);
1908 1.2 simonb
1909 1.2 simonb wl->wl_reserved_bytes = reserved;
1910 1.2 simonb wl->wl_head = head;
1911 1.2 simonb wl->wl_tail = tail;
1912 1.2 simonb KASSERT(wl->wl_reclaimable_bytes >= delta);
1913 1.2 simonb wl->wl_reclaimable_bytes -= delta;
1914 1.81 jdolecek KDASSERT(wl->wl_dealloccnt == 0);
1915 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1916 1.2 simonb wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1917 1.2 simonb #endif
1918 1.2 simonb
1919 1.2 simonb we->we_wapbl = wl;
1920 1.2 simonb we->we_bufcount = wl->wl_bufcount;
1921 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1922 1.2 simonb we->we_unsynced_bufbytes = wl->wl_bufbytes;
1923 1.2 simonb #endif
1924 1.2 simonb we->we_reclaimable_bytes = flushsize;
1925 1.2 simonb we->we_error = 0;
1926 1.2 simonb SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1927 1.2 simonb
1928 1.2 simonb /*
1929 1.94 jdolecek * This flushes bufs in order than they were queued, so the LRU
1930 1.94 jdolecek * order is preserved.
1931 1.2 simonb */
1932 1.94 jdolecek while ((bp = TAILQ_FIRST(&wl->wl_bufs)) != NULL) {
1933 1.2 simonb if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1934 1.2 simonb continue;
1935 1.2 simonb }
1936 1.2 simonb bp->b_iodone = wapbl_biodone;
1937 1.2 simonb bp->b_private = we;
1938 1.93 jdolecek
1939 1.2 simonb bremfree(bp);
1940 1.2 simonb wapbl_remove_buf_locked(wl, bp);
1941 1.2 simonb mutex_exit(&wl->wl_mtx);
1942 1.2 simonb mutex_exit(&bufcache_lock);
1943 1.2 simonb bawrite(bp);
1944 1.2 simonb mutex_enter(&bufcache_lock);
1945 1.2 simonb mutex_enter(&wl->wl_mtx);
1946 1.2 simonb }
1947 1.2 simonb mutex_exit(&wl->wl_mtx);
1948 1.2 simonb mutex_exit(&bufcache_lock);
1949 1.2 simonb
1950 1.2 simonb #if 0
1951 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1952 1.114 riastrad ("wapbl_flush thread %d.%d done flushing entries...\n",
1953 1.114 riastrad curproc->p_pid, curlwp->l_lid));
1954 1.2 simonb #endif
1955 1.2 simonb
1956 1.114 riastrad wait_out:
1957 1.2 simonb
1958 1.2 simonb /*
1959 1.2 simonb * If the waitfor flag is set, don't return until everything is
1960 1.2 simonb * fully flushed and the on disk log is empty.
1961 1.2 simonb */
1962 1.2 simonb if (waitfor) {
1963 1.91 riastrad error = wapbl_truncate(wl, wl->wl_circ_size -
1964 1.114 riastrad wl->wl_reserved_bytes);
1965 1.2 simonb }
1966 1.2 simonb
1967 1.114 riastrad out:
1968 1.2 simonb if (error) {
1969 1.81 jdolecek wl->wl_flush_abort(wl->wl_mount,
1970 1.86 jdolecek TAILQ_FIRST(&wl->wl_dealloclist));
1971 1.2 simonb }
1972 1.2 simonb
1973 1.2 simonb #ifdef WAPBL_DEBUG_PRINT
1974 1.2 simonb if (error) {
1975 1.2 simonb pid_t pid = -1;
1976 1.2 simonb lwpid_t lid = -1;
1977 1.2 simonb if (curproc)
1978 1.2 simonb pid = curproc->p_pid;
1979 1.2 simonb if (curlwp)
1980 1.2 simonb lid = curlwp->l_lid;
1981 1.2 simonb mutex_enter(&wl->wl_mtx);
1982 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
1983 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1984 1.2 simonb ("wapbl_flush: thread %d.%d aborted flush: "
1985 1.114 riastrad "error = %d\n"
1986 1.114 riastrad "\tbufcount=%zu bufbytes=%zu bcount=%zu "
1987 1.114 riastrad "deallocs=%d inodes=%d\n"
1988 1.114 riastrad "\terrcnt = %d, reclaimable=%zu reserved=%zu "
1989 1.114 riastrad "unsynced=%zu\n",
1990 1.114 riastrad pid, lid, error, wl->wl_bufcount,
1991 1.114 riastrad wl->wl_bufbytes, wl->wl_bcount,
1992 1.114 riastrad wl->wl_dealloccnt, wl->wl_inohashcnt,
1993 1.114 riastrad wl->wl_error_count, wl->wl_reclaimable_bytes,
1994 1.114 riastrad wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1995 1.2 simonb SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1996 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1997 1.2 simonb ("\tentry: bufcount = %zu, reclaimable = %zu, "
1998 1.114 riastrad "error = %d, unsynced = %zu\n",
1999 1.114 riastrad we->we_bufcount, we->we_reclaimable_bytes,
2000 1.114 riastrad we->we_error, we->we_unsynced_bufbytes));
2001 1.2 simonb }
2002 1.2 simonb #else
2003 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_ERROR,
2004 1.2 simonb ("wapbl_flush: thread %d.%d aborted flush: "
2005 1.114 riastrad "error = %d\n"
2006 1.114 riastrad "\tbufcount=%zu bufbytes=%zu bcount=%zu "
2007 1.114 riastrad "deallocs=%d inodes=%d\n"
2008 1.114 riastrad "\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
2009 1.114 riastrad pid, lid, error, wl->wl_bufcount,
2010 1.114 riastrad wl->wl_bufbytes, wl->wl_bcount,
2011 1.114 riastrad wl->wl_dealloccnt, wl->wl_inohashcnt,
2012 1.114 riastrad wl->wl_error_count, wl->wl_reclaimable_bytes,
2013 1.114 riastrad wl->wl_reserved_bytes));
2014 1.2 simonb SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
2015 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_ERROR,
2016 1.2 simonb ("\tentry: bufcount = %zu, reclaimable = %zu, "
2017 1.114 riastrad "error = %d\n", we->we_bufcount,
2018 1.114 riastrad we->we_reclaimable_bytes, we->we_error));
2019 1.2 simonb }
2020 1.2 simonb #endif
2021 1.2 simonb mutex_exit(&wl->wl_mtx);
2022 1.2 simonb }
2023 1.2 simonb #endif
2024 1.2 simonb
2025 1.2 simonb rw_exit(&wl->wl_rwlock);
2026 1.2 simonb return error;
2027 1.2 simonb }
2028 1.2 simonb
2029 1.2 simonb /****************************************************************/
2030 1.2 simonb
2031 1.2 simonb void
2032 1.2 simonb wapbl_jlock_assert(struct wapbl *wl)
2033 1.2 simonb {
2034 1.2 simonb
2035 1.23 ad KASSERT(rw_lock_held(&wl->wl_rwlock));
2036 1.2 simonb }
2037 1.2 simonb
2038 1.2 simonb void
2039 1.2 simonb wapbl_junlock_assert(struct wapbl *wl)
2040 1.2 simonb {
2041 1.2 simonb
2042 1.2 simonb KASSERT(!rw_write_held(&wl->wl_rwlock));
2043 1.2 simonb }
2044 1.2 simonb
2045 1.2 simonb /****************************************************************/
2046 1.2 simonb
2047 1.2 simonb /* locks missing */
2048 1.2 simonb void
2049 1.114 riastrad wapbl_print(struct wapbl *wl, int full, void (*pr)(const char *, ...))
2050 1.2 simonb {
2051 1.2 simonb struct buf *bp;
2052 1.2 simonb struct wapbl_entry *we;
2053 1.2 simonb (*pr)("wapbl %p", wl);
2054 1.2 simonb (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
2055 1.114 riastrad wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
2056 1.114 riastrad (*pr)("circ = %zu, header = %zu,"
2057 1.114 riastrad " head = %"PRIdMAX" tail = %"PRIdMAX"\n",
2058 1.114 riastrad wl->wl_circ_size, wl->wl_circ_off,
2059 1.114 riastrad (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
2060 1.2 simonb (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
2061 1.114 riastrad wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
2062 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
2063 1.2 simonb (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
2064 1.114 riastrad "reserved = %zu errcnt = %d unsynced = %zu\n",
2065 1.114 riastrad wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
2066 1.114 riastrad wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
2067 1.114 riastrad wl->wl_error_count, wl->wl_unsynced_bufbytes);
2068 1.2 simonb #else
2069 1.2 simonb (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
2070 1.114 riastrad "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
2071 1.114 riastrad wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
2072 1.114 riastrad wl->wl_error_count);
2073 1.2 simonb #endif
2074 1.2 simonb (*pr)("\tdealloccnt = %d, dealloclim = %d\n",
2075 1.114 riastrad wl->wl_dealloccnt, wl->wl_dealloclim);
2076 1.2 simonb (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
2077 1.114 riastrad wl->wl_inohashcnt, wl->wl_inohashmask);
2078 1.2 simonb (*pr)("entries:\n");
2079 1.2 simonb SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
2080 1.2 simonb #ifdef WAPBL_DEBUG_BUFBYTES
2081 1.2 simonb (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
2082 1.114 riastrad "unsynced = %zu\n",
2083 1.114 riastrad we->we_bufcount, we->we_reclaimable_bytes,
2084 1.114 riastrad we->we_error, we->we_unsynced_bufbytes);
2085 1.2 simonb #else
2086 1.2 simonb (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
2087 1.114 riastrad we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
2088 1.2 simonb #endif
2089 1.2 simonb }
2090 1.2 simonb if (full) {
2091 1.2 simonb int cnt = 0;
2092 1.2 simonb (*pr)("bufs =");
2093 1.94 jdolecek TAILQ_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
2094 1.94 jdolecek if (!TAILQ_NEXT(bp, b_wapbllist)) {
2095 1.2 simonb (*pr)(" %p", bp);
2096 1.2 simonb } else if ((++cnt % 6) == 0) {
2097 1.2 simonb (*pr)(" %p,\n\t", bp);
2098 1.2 simonb } else {
2099 1.2 simonb (*pr)(" %p,", bp);
2100 1.2 simonb }
2101 1.2 simonb }
2102 1.2 simonb (*pr)("\n");
2103 1.2 simonb
2104 1.2 simonb (*pr)("dealloced blks = ");
2105 1.2 simonb {
2106 1.81 jdolecek struct wapbl_dealloc *wd;
2107 1.2 simonb cnt = 0;
2108 1.86 jdolecek TAILQ_FOREACH(wd, &wl->wl_dealloclist, wd_entries) {
2109 1.2 simonb (*pr)(" %"PRId64":%d,",
2110 1.114 riastrad wd->wd_blkno,
2111 1.114 riastrad wd->wd_len);
2112 1.2 simonb if ((++cnt % 4) == 0) {
2113 1.2 simonb (*pr)("\n\t");
2114 1.2 simonb }
2115 1.2 simonb }
2116 1.2 simonb }
2117 1.2 simonb (*pr)("\n");
2118 1.2 simonb
2119 1.2 simonb (*pr)("registered inodes = ");
2120 1.2 simonb {
2121 1.2 simonb int i;
2122 1.2 simonb cnt = 0;
2123 1.2 simonb for (i = 0; i <= wl->wl_inohashmask; i++) {
2124 1.2 simonb struct wapbl_ino_head *wih;
2125 1.2 simonb struct wapbl_ino *wi;
2126 1.2 simonb
2127 1.2 simonb wih = &wl->wl_inohash[i];
2128 1.2 simonb LIST_FOREACH(wi, wih, wi_hash) {
2129 1.2 simonb if (wi->wi_ino == 0)
2130 1.2 simonb continue;
2131 1.55 christos (*pr)(" %"PRIu64"/0%06"PRIo32",",
2132 1.2 simonb wi->wi_ino, wi->wi_mode);
2133 1.2 simonb if ((++cnt % 4) == 0) {
2134 1.2 simonb (*pr)("\n\t");
2135 1.2 simonb }
2136 1.2 simonb }
2137 1.2 simonb }
2138 1.2 simonb (*pr)("\n");
2139 1.2 simonb }
2140 1.95 jdolecek
2141 1.95 jdolecek (*pr)("iobufs free =");
2142 1.95 jdolecek TAILQ_FOREACH(bp, &wl->wl_iobufs, b_wapbllist) {
2143 1.95 jdolecek if (!TAILQ_NEXT(bp, b_wapbllist)) {
2144 1.95 jdolecek (*pr)(" %p", bp);
2145 1.95 jdolecek } else if ((++cnt % 6) == 0) {
2146 1.95 jdolecek (*pr)(" %p,\n\t", bp);
2147 1.95 jdolecek } else {
2148 1.95 jdolecek (*pr)(" %p,", bp);
2149 1.95 jdolecek }
2150 1.95 jdolecek }
2151 1.95 jdolecek (*pr)("\n");
2152 1.95 jdolecek
2153 1.95 jdolecek (*pr)("iobufs busy =");
2154 1.95 jdolecek TAILQ_FOREACH(bp, &wl->wl_iobufs_busy, b_wapbllist) {
2155 1.95 jdolecek if (!TAILQ_NEXT(bp, b_wapbllist)) {
2156 1.95 jdolecek (*pr)(" %p", bp);
2157 1.95 jdolecek } else if ((++cnt % 6) == 0) {
2158 1.95 jdolecek (*pr)(" %p,\n\t", bp);
2159 1.95 jdolecek } else {
2160 1.95 jdolecek (*pr)(" %p,", bp);
2161 1.95 jdolecek }
2162 1.95 jdolecek }
2163 1.95 jdolecek (*pr)("\n");
2164 1.2 simonb }
2165 1.2 simonb }
2166 1.2 simonb
2167 1.2 simonb #if defined(WAPBL_DEBUG) || defined(DDB)
2168 1.2 simonb void
2169 1.2 simonb wapbl_dump(struct wapbl *wl)
2170 1.2 simonb {
2171 1.2 simonb #if defined(WAPBL_DEBUG)
2172 1.2 simonb if (!wl)
2173 1.2 simonb wl = wapbl_debug_wl;
2174 1.2 simonb #endif
2175 1.2 simonb if (!wl)
2176 1.2 simonb return;
2177 1.100 joerg wapbl_print(wl, 1, printf);
2178 1.2 simonb }
2179 1.2 simonb #endif
2180 1.2 simonb
2181 1.2 simonb /****************************************************************/
2182 1.2 simonb
2183 1.85 jdolecek int
2184 1.86 jdolecek wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len, bool force,
2185 1.86 jdolecek void **cookiep)
2186 1.2 simonb {
2187 1.81 jdolecek struct wapbl_dealloc *wd;
2188 1.85 jdolecek int error = 0;
2189 1.2 simonb
2190 1.2 simonb wapbl_jlock_assert(wl);
2191 1.2 simonb
2192 1.38 hannken mutex_enter(&wl->wl_mtx);
2193 1.85 jdolecek
2194 1.85 jdolecek if (__predict_false(wl->wl_dealloccnt >= wl->wl_dealloclim)) {
2195 1.85 jdolecek if (!force) {
2196 1.116 riastrad error = SET_ERROR(EAGAIN);
2197 1.85 jdolecek goto out;
2198 1.85 jdolecek }
2199 1.85 jdolecek
2200 1.85 jdolecek /*
2201 1.85 jdolecek * Forced registration can only be used when:
2202 1.85 jdolecek * 1) the caller can't cope with failure
2203 1.85 jdolecek * 2) the path can be triggered only bounded, small
2204 1.85 jdolecek * times per transaction
2205 1.85 jdolecek * If this is not fullfilled, and the path would be triggered
2206 1.85 jdolecek * many times, this could overflow maximum transaction size
2207 1.85 jdolecek * and panic later.
2208 1.85 jdolecek */
2209 1.114 riastrad printf("%s: forced dealloc registration over limit:"
2210 1.114 riastrad " %d >= %d\n",
2211 1.114 riastrad wl->wl_mount->mnt_stat.f_mntonname,
2212 1.114 riastrad wl->wl_dealloccnt, wl->wl_dealloclim);
2213 1.85 jdolecek }
2214 1.27 pooka
2215 1.84 jdolecek wl->wl_dealloccnt++;
2216 1.84 jdolecek mutex_exit(&wl->wl_mtx);
2217 1.84 jdolecek
2218 1.81 jdolecek wd = pool_get(&wapbl_dealloc_pool, PR_WAITOK);
2219 1.81 jdolecek wd->wd_blkno = blk;
2220 1.81 jdolecek wd->wd_len = len;
2221 1.81 jdolecek
2222 1.84 jdolecek mutex_enter(&wl->wl_mtx);
2223 1.86 jdolecek TAILQ_INSERT_TAIL(&wl->wl_dealloclist, wd, wd_entries);
2224 1.86 jdolecek
2225 1.86 jdolecek if (cookiep)
2226 1.86 jdolecek *cookiep = wd;
2227 1.85 jdolecek
2228 1.114 riastrad out:
2229 1.84 jdolecek mutex_exit(&wl->wl_mtx);
2230 1.81 jdolecek
2231 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
2232 1.85 jdolecek ("wapbl_register_deallocation: blk=%"PRId64" len=%d error=%d\n",
2233 1.114 riastrad blk, len, error));
2234 1.85 jdolecek
2235 1.85 jdolecek return error;
2236 1.2 simonb }
2237 1.2 simonb
2238 1.86 jdolecek static void
2239 1.86 jdolecek wapbl_deallocation_free(struct wapbl *wl, struct wapbl_dealloc *wd,
2240 1.86 jdolecek bool locked)
2241 1.86 jdolecek {
2242 1.114 riastrad
2243 1.86 jdolecek KASSERT(!locked
2244 1.86 jdolecek || rw_lock_held(&wl->wl_rwlock) || mutex_owned(&wl->wl_mtx));
2245 1.86 jdolecek
2246 1.86 jdolecek if (!locked)
2247 1.86 jdolecek mutex_enter(&wl->wl_mtx);
2248 1.86 jdolecek
2249 1.86 jdolecek TAILQ_REMOVE(&wl->wl_dealloclist, wd, wd_entries);
2250 1.86 jdolecek wl->wl_dealloccnt--;
2251 1.86 jdolecek
2252 1.86 jdolecek if (!locked)
2253 1.86 jdolecek mutex_exit(&wl->wl_mtx);
2254 1.86 jdolecek
2255 1.86 jdolecek pool_put(&wapbl_dealloc_pool, wd);
2256 1.86 jdolecek }
2257 1.86 jdolecek
2258 1.86 jdolecek void
2259 1.86 jdolecek wapbl_unregister_deallocation(struct wapbl *wl, void *cookie)
2260 1.86 jdolecek {
2261 1.114 riastrad
2262 1.86 jdolecek KASSERT(cookie != NULL);
2263 1.86 jdolecek wapbl_deallocation_free(wl, cookie, false);
2264 1.86 jdolecek }
2265 1.86 jdolecek
2266 1.2 simonb /****************************************************************/
2267 1.2 simonb
2268 1.2 simonb static void
2269 1.2 simonb wapbl_inodetrk_init(struct wapbl *wl, u_int size)
2270 1.2 simonb {
2271 1.2 simonb
2272 1.2 simonb wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
2273 1.2 simonb if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) {
2274 1.2 simonb pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0,
2275 1.2 simonb "wapblinopl", &pool_allocator_nointr, IPL_NONE);
2276 1.2 simonb }
2277 1.2 simonb }
2278 1.2 simonb
2279 1.2 simonb static void
2280 1.2 simonb wapbl_inodetrk_free(struct wapbl *wl)
2281 1.2 simonb {
2282 1.2 simonb
2283 1.2 simonb /* XXX this KASSERT needs locking/mutex analysis */
2284 1.2 simonb KASSERT(wl->wl_inohashcnt == 0);
2285 1.2 simonb hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
2286 1.112 riastrad membar_release();
2287 1.2 simonb if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) {
2288 1.112 riastrad membar_acquire();
2289 1.2 simonb pool_destroy(&wapbl_ino_pool);
2290 1.2 simonb }
2291 1.2 simonb }
2292 1.2 simonb
2293 1.2 simonb static struct wapbl_ino *
2294 1.2 simonb wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
2295 1.2 simonb {
2296 1.2 simonb struct wapbl_ino_head *wih;
2297 1.2 simonb struct wapbl_ino *wi;
2298 1.2 simonb
2299 1.2 simonb KASSERT(mutex_owned(&wl->wl_mtx));
2300 1.2 simonb
2301 1.2 simonb wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
2302 1.2 simonb LIST_FOREACH(wi, wih, wi_hash) {
2303 1.2 simonb if (ino == wi->wi_ino)
2304 1.2 simonb return wi;
2305 1.2 simonb }
2306 1.2 simonb return 0;
2307 1.2 simonb }
2308 1.2 simonb
2309 1.2 simonb void
2310 1.2 simonb wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
2311 1.2 simonb {
2312 1.2 simonb struct wapbl_ino_head *wih;
2313 1.2 simonb struct wapbl_ino *wi;
2314 1.2 simonb
2315 1.2 simonb wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
2316 1.2 simonb
2317 1.2 simonb mutex_enter(&wl->wl_mtx);
2318 1.2 simonb if (wapbl_inodetrk_get(wl, ino) == NULL) {
2319 1.2 simonb wi->wi_ino = ino;
2320 1.2 simonb wi->wi_mode = mode;
2321 1.2 simonb wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
2322 1.2 simonb LIST_INSERT_HEAD(wih, wi, wi_hash);
2323 1.2 simonb wl->wl_inohashcnt++;
2324 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_INODE,
2325 1.2 simonb ("wapbl_register_inode: ino=%"PRId64"\n", ino));
2326 1.2 simonb mutex_exit(&wl->wl_mtx);
2327 1.2 simonb } else {
2328 1.2 simonb mutex_exit(&wl->wl_mtx);
2329 1.2 simonb pool_put(&wapbl_ino_pool, wi);
2330 1.2 simonb }
2331 1.2 simonb }
2332 1.2 simonb
2333 1.2 simonb void
2334 1.2 simonb wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
2335 1.2 simonb {
2336 1.2 simonb struct wapbl_ino *wi;
2337 1.2 simonb
2338 1.2 simonb mutex_enter(&wl->wl_mtx);
2339 1.2 simonb wi = wapbl_inodetrk_get(wl, ino);
2340 1.2 simonb if (wi) {
2341 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_INODE,
2342 1.2 simonb ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
2343 1.2 simonb KASSERT(wl->wl_inohashcnt > 0);
2344 1.2 simonb wl->wl_inohashcnt--;
2345 1.2 simonb LIST_REMOVE(wi, wi_hash);
2346 1.2 simonb mutex_exit(&wl->wl_mtx);
2347 1.2 simonb
2348 1.2 simonb pool_put(&wapbl_ino_pool, wi);
2349 1.2 simonb } else {
2350 1.2 simonb mutex_exit(&wl->wl_mtx);
2351 1.2 simonb }
2352 1.2 simonb }
2353 1.2 simonb
2354 1.2 simonb /****************************************************************/
2355 1.2 simonb
2356 1.71 riastrad /*
2357 1.71 riastrad * wapbl_transaction_inodes_len(wl)
2358 1.71 riastrad *
2359 1.71 riastrad * Calculate the number of bytes required for inode registration
2360 1.71 riastrad * log records in wl.
2361 1.71 riastrad */
2362 1.30 uebayasi static inline size_t
2363 1.2 simonb wapbl_transaction_inodes_len(struct wapbl *wl)
2364 1.2 simonb {
2365 1.2 simonb int blocklen = 1<<wl->wl_log_dev_bshift;
2366 1.2 simonb int iph;
2367 1.2 simonb
2368 1.2 simonb /* Calculate number of inodes described in a inodelist header */
2369 1.2 simonb iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2370 1.2 simonb sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2371 1.2 simonb
2372 1.2 simonb KASSERT(iph > 0);
2373 1.2 simonb
2374 1.39 christos return MAX(1, howmany(wl->wl_inohashcnt, iph)) * blocklen;
2375 1.2 simonb }
2376 1.2 simonb
2377 1.2 simonb
2378 1.71 riastrad /*
2379 1.71 riastrad * wapbl_transaction_len(wl)
2380 1.71 riastrad *
2381 1.71 riastrad * Calculate number of bytes required for all log records in wl.
2382 1.71 riastrad */
2383 1.2 simonb static size_t
2384 1.2 simonb wapbl_transaction_len(struct wapbl *wl)
2385 1.2 simonb {
2386 1.2 simonb int blocklen = 1<<wl->wl_log_dev_bshift;
2387 1.2 simonb size_t len;
2388 1.2 simonb
2389 1.80 jdolecek /* Calculate number of blocks described in a blocklist header */
2390 1.2 simonb len = wl->wl_bcount;
2391 1.79 jdolecek len += howmany(wl->wl_bufcount, wl->wl_brperjblock) * blocklen;
2392 1.79 jdolecek len += howmany(wl->wl_dealloccnt, wl->wl_brperjblock) * blocklen;
2393 1.2 simonb len += wapbl_transaction_inodes_len(wl);
2394 1.2 simonb
2395 1.2 simonb return len;
2396 1.2 simonb }
2397 1.2 simonb
2398 1.2 simonb /*
2399 1.71 riastrad * wapbl_cache_sync(wl, msg)
2400 1.71 riastrad *
2401 1.71 riastrad * Issue DIOCCACHESYNC to wl->wl_devvp.
2402 1.71 riastrad *
2403 1.71 riastrad * If sysctl(vfs.wapbl.verbose_commit) >= 2, print a message
2404 1.71 riastrad * including msg about the duration of the cache sync.
2405 1.48 yamt */
2406 1.48 yamt static int
2407 1.48 yamt wapbl_cache_sync(struct wapbl *wl, const char *msg)
2408 1.48 yamt {
2409 1.48 yamt const bool verbose = wapbl_verbose_commit >= 2;
2410 1.48 yamt struct bintime start_time;
2411 1.48 yamt int force = 1;
2412 1.48 yamt int error;
2413 1.48 yamt
2414 1.101 jdolecek /* Skip full cache sync if disabled */
2415 1.101 jdolecek if (!wapbl_flush_disk_cache) {
2416 1.48 yamt return 0;
2417 1.48 yamt }
2418 1.48 yamt if (verbose) {
2419 1.48 yamt bintime(&start_time);
2420 1.48 yamt }
2421 1.48 yamt error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force,
2422 1.48 yamt FWRITE, FSCRED);
2423 1.48 yamt if (error) {
2424 1.48 yamt WAPBL_PRINTF(WAPBL_PRINT_ERROR,
2425 1.76 riastrad ("wapbl_cache_sync: DIOCCACHESYNC on dev 0x%jx "
2426 1.114 riastrad "returned %d\n", (uintmax_t)wl->wl_devvp->v_rdev,
2427 1.114 riastrad error));
2428 1.48 yamt }
2429 1.48 yamt if (verbose) {
2430 1.48 yamt struct bintime d;
2431 1.48 yamt struct timespec ts;
2432 1.48 yamt
2433 1.48 yamt bintime(&d);
2434 1.48 yamt bintime_sub(&d, &start_time);
2435 1.48 yamt bintime2timespec(&d, &ts);
2436 1.48 yamt printf("wapbl_cache_sync: %s: dev 0x%jx %ju.%09lu\n",
2437 1.48 yamt msg, (uintmax_t)wl->wl_devvp->v_rdev,
2438 1.48 yamt (uintmax_t)ts.tv_sec, ts.tv_nsec);
2439 1.48 yamt }
2440 1.87 jdolecek
2441 1.87 jdolecek wl->wl_ev_cacheflush.ev_count++;
2442 1.87 jdolecek
2443 1.48 yamt return error;
2444 1.48 yamt }
2445 1.48 yamt
2446 1.48 yamt /*
2447 1.71 riastrad * wapbl_write_commit(wl, head, tail)
2448 1.71 riastrad *
2449 1.71 riastrad * Issue a disk cache sync to wait for all pending writes to the
2450 1.71 riastrad * log to complete, and then synchronously commit the current
2451 1.71 riastrad * circular queue head and tail to the log, in the next of two
2452 1.71 riastrad * locations for commit headers on disk.
2453 1.2 simonb *
2454 1.71 riastrad * Increment the generation number. If the generation number
2455 1.71 riastrad * rolls over to zero, then a subsequent commit would appear to
2456 1.71 riastrad * have an older generation than this one -- in that case, issue a
2457 1.71 riastrad * duplicate commit to avoid this.
2458 1.71 riastrad *
2459 1.71 riastrad * => Caller must have exclusive access to wl, either by holding
2460 1.71 riastrad * wl->wl_rwlock for writer or by being wapbl_start before anyone
2461 1.71 riastrad * else has seen wl.
2462 1.2 simonb */
2463 1.2 simonb static int
2464 1.2 simonb wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
2465 1.2 simonb {
2466 1.2 simonb struct wapbl_wc_header *wc = wl->wl_wc_header;
2467 1.2 simonb struct timespec ts;
2468 1.2 simonb int error;
2469 1.34 mlelstv daddr_t pbn;
2470 1.2 simonb
2471 1.95 jdolecek error = wapbl_buffered_flush(wl, true);
2472 1.54 hannken if (error)
2473 1.54 hannken return error;
2474 1.49 yamt /*
2475 1.101 jdolecek * Flush disk cache to ensure that blocks we've written are actually
2476 1.49 yamt * written to the stable storage before the commit header.
2477 1.101 jdolecek * This flushes to disk not only journal blocks, but also all
2478 1.101 jdolecek * metadata blocks, written asynchronously since previous commit.
2479 1.49 yamt *
2480 1.49 yamt * XXX Calc checksum here, instead we do this for now
2481 1.49 yamt */
2482 1.48 yamt wapbl_cache_sync(wl, "1");
2483 1.2 simonb
2484 1.2 simonb wc->wc_head = head;
2485 1.2 simonb wc->wc_tail = tail;
2486 1.2 simonb wc->wc_checksum = 0;
2487 1.2 simonb wc->wc_version = 1;
2488 1.2 simonb getnanotime(&ts);
2489 1.17 yamt wc->wc_time = ts.tv_sec;
2490 1.2 simonb wc->wc_timensec = ts.tv_nsec;
2491 1.2 simonb
2492 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2493 1.2 simonb ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
2494 1.114 riastrad (intmax_t)head, (intmax_t)tail));
2495 1.2 simonb
2496 1.2 simonb /*
2497 1.49 yamt * write the commit header.
2498 1.49 yamt *
2499 1.2 simonb * XXX if generation will rollover, then first zero
2500 1.2 simonb * over second commit header before trying to write both headers.
2501 1.2 simonb */
2502 1.2 simonb
2503 1.34 mlelstv pbn = wl->wl_logpbn + (wc->wc_generation % 2);
2504 1.34 mlelstv #ifdef _KERNEL
2505 1.34 mlelstv pbn = btodb(pbn << wc->wc_log_dev_bshift);
2506 1.34 mlelstv #endif
2507 1.114 riastrad error = wapbl_buffered_write(wc, wc->wc_len, wl, pbn,
2508 1.114 riastrad WAPBL_JFLAGS(wl));
2509 1.54 hannken if (error)
2510 1.54 hannken return error;
2511 1.95 jdolecek error = wapbl_buffered_flush(wl, true);
2512 1.2 simonb if (error)
2513 1.2 simonb return error;
2514 1.2 simonb
2515 1.49 yamt /*
2516 1.101 jdolecek * Flush disk cache to ensure that the commit header is actually
2517 1.101 jdolecek * written before meta data blocks. Commit block is written using
2518 1.101 jdolecek * FUA when enabled, in that case this flush is not needed.
2519 1.49 yamt */
2520 1.101 jdolecek if (!WAPBL_USE_FUA(wl))
2521 1.101 jdolecek wapbl_cache_sync(wl, "2");
2522 1.2 simonb
2523 1.2 simonb /*
2524 1.2 simonb * If the generation number was zero, write it out a second time.
2525 1.2 simonb * This handles initialization and generation number rollover
2526 1.2 simonb */
2527 1.2 simonb if (wc->wc_generation++ == 0) {
2528 1.2 simonb error = wapbl_write_commit(wl, head, tail);
2529 1.2 simonb /*
2530 1.2 simonb * This panic should be able to be removed if we do the
2531 1.2 simonb * zero'ing mentioned above, and we are certain to roll
2532 1.2 simonb * back generation number on failure.
2533 1.2 simonb */
2534 1.114 riastrad if (error) {
2535 1.2 simonb panic("wapbl_write_commit: error writing duplicate "
2536 1.114 riastrad "log header: %d", error);
2537 1.114 riastrad }
2538 1.2 simonb }
2539 1.87 jdolecek
2540 1.87 jdolecek wl->wl_ev_commit.ev_count++;
2541 1.87 jdolecek
2542 1.2 simonb return 0;
2543 1.2 simonb }
2544 1.2 simonb
2545 1.71 riastrad /*
2546 1.71 riastrad * wapbl_write_blocks(wl, offp)
2547 1.71 riastrad *
2548 1.71 riastrad * Write all pending physical blocks in the current transaction
2549 1.71 riastrad * from wapbl_add_buf to the log on disk, adding to the circular
2550 1.71 riastrad * queue head at byte offset *offp, and returning the new head's
2551 1.71 riastrad * byte offset in *offp.
2552 1.71 riastrad */
2553 1.2 simonb static int
2554 1.2 simonb wapbl_write_blocks(struct wapbl *wl, off_t *offp)
2555 1.2 simonb {
2556 1.2 simonb struct wapbl_wc_blocklist *wc =
2557 1.2 simonb (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2558 1.2 simonb int blocklen = 1<<wl->wl_log_dev_bshift;
2559 1.2 simonb struct buf *bp;
2560 1.2 simonb off_t off = *offp;
2561 1.2 simonb int error;
2562 1.7 joerg size_t padding;
2563 1.2 simonb
2564 1.2 simonb KASSERT(rw_write_held(&wl->wl_rwlock));
2565 1.2 simonb
2566 1.94 jdolecek bp = TAILQ_FIRST(&wl->wl_bufs);
2567 1.2 simonb
2568 1.2 simonb while (bp) {
2569 1.2 simonb int cnt;
2570 1.2 simonb struct buf *obp = bp;
2571 1.2 simonb
2572 1.2 simonb KASSERT(bp->b_flags & B_LOCKED);
2573 1.2 simonb
2574 1.2 simonb wc->wc_type = WAPBL_WC_BLOCKS;
2575 1.2 simonb wc->wc_len = blocklen;
2576 1.2 simonb wc->wc_blkcount = 0;
2577 1.109 chs wc->wc_unused = 0;
2578 1.114 riastrad while (bp && wc->wc_blkcount < wl->wl_brperjblock) {
2579 1.2 simonb /*
2580 1.2 simonb * Make sure all the physical block numbers are up to
2581 1.2 simonb * date. If this is not always true on a given
2582 1.2 simonb * filesystem, then VOP_BMAP must be called. We
2583 1.2 simonb * could call VOP_BMAP here, or else in the filesystem
2584 1.2 simonb * specific flush callback, although neither of those
2585 1.2 simonb * solutions allow us to take the vnode lock. If a
2586 1.2 simonb * filesystem requires that we must take the vnode lock
2587 1.2 simonb * to call VOP_BMAP, then we can probably do it in
2588 1.2 simonb * bwrite when the vnode lock should already be held
2589 1.2 simonb * by the invoking code.
2590 1.2 simonb */
2591 1.114 riastrad KASSERT(bp->b_vp->v_type == VBLK ||
2592 1.114 riastrad bp->b_blkno != bp->b_lblkno);
2593 1.2 simonb KASSERT(bp->b_blkno > 0);
2594 1.2 simonb
2595 1.2 simonb wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
2596 1.2 simonb wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
2597 1.2 simonb wc->wc_len += bp->b_bcount;
2598 1.2 simonb wc->wc_blkcount++;
2599 1.94 jdolecek bp = TAILQ_NEXT(bp, b_wapbllist);
2600 1.2 simonb }
2601 1.7 joerg if (wc->wc_len % blocklen != 0) {
2602 1.7 joerg padding = blocklen - wc->wc_len % blocklen;
2603 1.7 joerg wc->wc_len += padding;
2604 1.7 joerg } else {
2605 1.7 joerg padding = 0;
2606 1.7 joerg }
2607 1.7 joerg
2608 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2609 1.114 riastrad ("wapbl_write_blocks:"
2610 1.114 riastrad " len = %u (padding %zu) off = %"PRIdMAX"\n",
2611 1.114 riastrad wc->wc_len, padding, (intmax_t)off));
2612 1.2 simonb
2613 1.2 simonb error = wapbl_circ_write(wl, wc, blocklen, &off);
2614 1.2 simonb if (error)
2615 1.2 simonb return error;
2616 1.2 simonb bp = obp;
2617 1.2 simonb cnt = 0;
2618 1.114 riastrad while (bp && cnt++ < wl->wl_brperjblock) {
2619 1.2 simonb error = wapbl_circ_write(wl, bp->b_data,
2620 1.2 simonb bp->b_bcount, &off);
2621 1.2 simonb if (error)
2622 1.2 simonb return error;
2623 1.94 jdolecek bp = TAILQ_NEXT(bp, b_wapbllist);
2624 1.2 simonb }
2625 1.7 joerg if (padding) {
2626 1.7 joerg void *zero;
2627 1.91 riastrad
2628 1.51 para zero = wapbl_alloc(padding);
2629 1.7 joerg memset(zero, 0, padding);
2630 1.7 joerg error = wapbl_circ_write(wl, zero, padding, &off);
2631 1.18 yamt wapbl_free(zero, padding);
2632 1.7 joerg if (error)
2633 1.7 joerg return error;
2634 1.7 joerg }
2635 1.2 simonb }
2636 1.2 simonb *offp = off;
2637 1.2 simonb return 0;
2638 1.2 simonb }
2639 1.2 simonb
2640 1.71 riastrad /*
2641 1.71 riastrad * wapbl_write_revocations(wl, offp)
2642 1.71 riastrad *
2643 1.71 riastrad * Write all pending deallocations in the current transaction from
2644 1.71 riastrad * wapbl_register_deallocation to the log on disk, adding to the
2645 1.71 riastrad * circular queue's head at byte offset *offp, and returning the
2646 1.71 riastrad * new head's byte offset in *offp.
2647 1.71 riastrad */
2648 1.2 simonb static int
2649 1.2 simonb wapbl_write_revocations(struct wapbl *wl, off_t *offp)
2650 1.2 simonb {
2651 1.2 simonb struct wapbl_wc_blocklist *wc =
2652 1.2 simonb (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2653 1.81 jdolecek struct wapbl_dealloc *wd, *lwd;
2654 1.2 simonb int blocklen = 1<<wl->wl_log_dev_bshift;
2655 1.2 simonb off_t off = *offp;
2656 1.2 simonb int error;
2657 1.2 simonb
2658 1.89 riastrad KASSERT(rw_write_held(&wl->wl_rwlock));
2659 1.89 riastrad
2660 1.2 simonb if (wl->wl_dealloccnt == 0)
2661 1.2 simonb return 0;
2662 1.2 simonb
2663 1.86 jdolecek while ((wd = TAILQ_FIRST(&wl->wl_dealloclist)) != NULL) {
2664 1.2 simonb wc->wc_type = WAPBL_WC_REVOCATIONS;
2665 1.2 simonb wc->wc_len = blocklen;
2666 1.2 simonb wc->wc_blkcount = 0;
2667 1.109 chs wc->wc_unused = 0;
2668 1.114 riastrad while (wd && wc->wc_blkcount < wl->wl_brperjblock) {
2669 1.2 simonb wc->wc_blocks[wc->wc_blkcount].wc_daddr =
2670 1.81 jdolecek wd->wd_blkno;
2671 1.2 simonb wc->wc_blocks[wc->wc_blkcount].wc_dlen =
2672 1.81 jdolecek wd->wd_len;
2673 1.2 simonb wc->wc_blkcount++;
2674 1.81 jdolecek
2675 1.86 jdolecek wd = TAILQ_NEXT(wd, wd_entries);
2676 1.2 simonb }
2677 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2678 1.2 simonb ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
2679 1.114 riastrad wc->wc_len, (intmax_t)off));
2680 1.2 simonb error = wapbl_circ_write(wl, wc, blocklen, &off);
2681 1.2 simonb if (error)
2682 1.2 simonb return error;
2683 1.81 jdolecek
2684 1.81 jdolecek /* free all successfully written deallocs */
2685 1.81 jdolecek lwd = wd;
2686 1.86 jdolecek while ((wd = TAILQ_FIRST(&wl->wl_dealloclist)) != NULL) {
2687 1.83 jdolecek if (wd == lwd)
2688 1.83 jdolecek break;
2689 1.86 jdolecek wapbl_deallocation_free(wl, wd, true);
2690 1.81 jdolecek }
2691 1.2 simonb }
2692 1.2 simonb *offp = off;
2693 1.2 simonb return 0;
2694 1.2 simonb }
2695 1.2 simonb
2696 1.71 riastrad /*
2697 1.71 riastrad * wapbl_write_inodes(wl, offp)
2698 1.71 riastrad *
2699 1.71 riastrad * Write all pending inode allocations in the current transaction
2700 1.71 riastrad * from wapbl_register_inode to the log on disk, adding to the
2701 1.71 riastrad * circular queue's head at byte offset *offp and returning the
2702 1.71 riastrad * new head's byte offset in *offp.
2703 1.71 riastrad */
2704 1.2 simonb static int
2705 1.2 simonb wapbl_write_inodes(struct wapbl *wl, off_t *offp)
2706 1.2 simonb {
2707 1.2 simonb struct wapbl_wc_inodelist *wc =
2708 1.2 simonb (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
2709 1.2 simonb int i;
2710 1.14 joerg int blocklen = 1 << wl->wl_log_dev_bshift;
2711 1.2 simonb off_t off = *offp;
2712 1.2 simonb int error;
2713 1.2 simonb
2714 1.2 simonb struct wapbl_ino_head *wih;
2715 1.2 simonb struct wapbl_ino *wi;
2716 1.2 simonb int iph;
2717 1.2 simonb
2718 1.2 simonb iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2719 1.2 simonb sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2720 1.2 simonb
2721 1.2 simonb i = 0;
2722 1.2 simonb wih = &wl->wl_inohash[0];
2723 1.2 simonb wi = 0;
2724 1.2 simonb do {
2725 1.2 simonb wc->wc_type = WAPBL_WC_INODES;
2726 1.2 simonb wc->wc_len = blocklen;
2727 1.2 simonb wc->wc_inocnt = 0;
2728 1.2 simonb wc->wc_clear = (i == 0);
2729 1.114 riastrad while (i < wl->wl_inohashcnt && wc->wc_inocnt < iph) {
2730 1.2 simonb while (!wi) {
2731 1.2 simonb KASSERT((wih - &wl->wl_inohash[0])
2732 1.2 simonb <= wl->wl_inohashmask);
2733 1.2 simonb wi = LIST_FIRST(wih++);
2734 1.2 simonb }
2735 1.2 simonb wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2736 1.2 simonb wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2737 1.2 simonb wc->wc_inocnt++;
2738 1.2 simonb i++;
2739 1.2 simonb wi = LIST_NEXT(wi, wi_hash);
2740 1.2 simonb }
2741 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2742 1.2 simonb ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2743 1.114 riastrad wc->wc_len, (intmax_t)off));
2744 1.2 simonb error = wapbl_circ_write(wl, wc, blocklen, &off);
2745 1.2 simonb if (error)
2746 1.2 simonb return error;
2747 1.2 simonb } while (i < wl->wl_inohashcnt);
2748 1.91 riastrad
2749 1.2 simonb *offp = off;
2750 1.2 simonb return 0;
2751 1.2 simonb }
2752 1.2 simonb
2753 1.2 simonb #endif /* _KERNEL */
2754 1.2 simonb
2755 1.2 simonb /****************************************************************/
2756 1.2 simonb
2757 1.2 simonb struct wapbl_blk {
2758 1.2 simonb LIST_ENTRY(wapbl_blk) wb_hash;
2759 1.2 simonb daddr_t wb_blk;
2760 1.2 simonb off_t wb_off; /* Offset of this block in the log */
2761 1.2 simonb };
2762 1.2 simonb #define WAPBL_BLKPOOL_MIN 83
2763 1.2 simonb
2764 1.2 simonb static void
2765 1.2 simonb wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2766 1.2 simonb {
2767 1.114 riastrad
2768 1.2 simonb if (size < WAPBL_BLKPOOL_MIN)
2769 1.2 simonb size = WAPBL_BLKPOOL_MIN;
2770 1.2 simonb KASSERT(wr->wr_blkhash == 0);
2771 1.2 simonb #ifdef _KERNEL
2772 1.2 simonb wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2773 1.2 simonb #else /* ! _KERNEL */
2774 1.2 simonb /* Manually implement hashinit */
2775 1.2 simonb {
2776 1.25 lukem unsigned long i, hashsize;
2777 1.114 riastrad
2778 1.2 simonb for (hashsize = 1; hashsize < size; hashsize <<= 1)
2779 1.2 simonb continue;
2780 1.114 riastrad wr->wr_blkhash = wapbl_alloc(hashsize *
2781 1.114 riastrad sizeof(*wr->wr_blkhash));
2782 1.37 drochner for (i = 0; i < hashsize; i++)
2783 1.2 simonb LIST_INIT(&wr->wr_blkhash[i]);
2784 1.2 simonb wr->wr_blkhashmask = hashsize - 1;
2785 1.2 simonb }
2786 1.2 simonb #endif /* ! _KERNEL */
2787 1.2 simonb }
2788 1.2 simonb
2789 1.2 simonb static void
2790 1.2 simonb wapbl_blkhash_free(struct wapbl_replay *wr)
2791 1.2 simonb {
2792 1.114 riastrad
2793 1.2 simonb KASSERT(wr->wr_blkhashcnt == 0);
2794 1.2 simonb #ifdef _KERNEL
2795 1.2 simonb hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2796 1.2 simonb #else /* ! _KERNEL */
2797 1.18 yamt wapbl_free(wr->wr_blkhash,
2798 1.18 yamt (wr->wr_blkhashmask + 1) * sizeof(*wr->wr_blkhash));
2799 1.2 simonb #endif /* ! _KERNEL */
2800 1.2 simonb }
2801 1.2 simonb
2802 1.2 simonb static struct wapbl_blk *
2803 1.2 simonb wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2804 1.2 simonb {
2805 1.2 simonb struct wapbl_blk_head *wbh;
2806 1.2 simonb struct wapbl_blk *wb;
2807 1.114 riastrad
2808 1.2 simonb wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2809 1.2 simonb LIST_FOREACH(wb, wbh, wb_hash) {
2810 1.2 simonb if (blk == wb->wb_blk)
2811 1.2 simonb return wb;
2812 1.2 simonb }
2813 1.2 simonb return 0;
2814 1.2 simonb }
2815 1.2 simonb
2816 1.2 simonb static void
2817 1.2 simonb wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2818 1.2 simonb {
2819 1.2 simonb struct wapbl_blk_head *wbh;
2820 1.2 simonb struct wapbl_blk *wb;
2821 1.114 riastrad
2822 1.2 simonb wb = wapbl_blkhash_get(wr, blk);
2823 1.2 simonb if (wb) {
2824 1.2 simonb KASSERT(wb->wb_blk == blk);
2825 1.2 simonb wb->wb_off = off;
2826 1.2 simonb } else {
2827 1.51 para wb = wapbl_alloc(sizeof(*wb));
2828 1.2 simonb wb->wb_blk = blk;
2829 1.2 simonb wb->wb_off = off;
2830 1.2 simonb wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2831 1.2 simonb LIST_INSERT_HEAD(wbh, wb, wb_hash);
2832 1.2 simonb wr->wr_blkhashcnt++;
2833 1.2 simonb }
2834 1.2 simonb }
2835 1.2 simonb
2836 1.2 simonb static void
2837 1.2 simonb wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2838 1.2 simonb {
2839 1.2 simonb struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2840 1.114 riastrad
2841 1.2 simonb if (wb) {
2842 1.2 simonb KASSERT(wr->wr_blkhashcnt > 0);
2843 1.2 simonb wr->wr_blkhashcnt--;
2844 1.2 simonb LIST_REMOVE(wb, wb_hash);
2845 1.18 yamt wapbl_free(wb, sizeof(*wb));
2846 1.2 simonb }
2847 1.2 simonb }
2848 1.2 simonb
2849 1.2 simonb static void
2850 1.2 simonb wapbl_blkhash_clear(struct wapbl_replay *wr)
2851 1.2 simonb {
2852 1.25 lukem unsigned long i;
2853 1.114 riastrad
2854 1.2 simonb for (i = 0; i <= wr->wr_blkhashmask; i++) {
2855 1.2 simonb struct wapbl_blk *wb;
2856 1.2 simonb
2857 1.2 simonb while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2858 1.2 simonb KASSERT(wr->wr_blkhashcnt > 0);
2859 1.2 simonb wr->wr_blkhashcnt--;
2860 1.2 simonb LIST_REMOVE(wb, wb_hash);
2861 1.18 yamt wapbl_free(wb, sizeof(*wb));
2862 1.2 simonb }
2863 1.2 simonb }
2864 1.2 simonb KASSERT(wr->wr_blkhashcnt == 0);
2865 1.2 simonb }
2866 1.2 simonb
2867 1.2 simonb /****************************************************************/
2868 1.2 simonb
2869 1.71 riastrad /*
2870 1.71 riastrad * wapbl_circ_read(wr, data, len, offp)
2871 1.71 riastrad *
2872 1.71 riastrad * Read len bytes into data from the circular queue of wr,
2873 1.71 riastrad * starting at the linear byte offset *offp, and returning the new
2874 1.71 riastrad * linear byte offset in *offp.
2875 1.71 riastrad *
2876 1.71 riastrad * If the starting linear byte offset precedes wr->wr_circ_off,
2877 1.71 riastrad * the read instead begins at wr->wr_circ_off. XXX WTF? This
2878 1.71 riastrad * should be a KASSERT, not a conditional.
2879 1.71 riastrad */
2880 1.2 simonb static int
2881 1.2 simonb wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2882 1.2 simonb {
2883 1.2 simonb size_t slen;
2884 1.2 simonb off_t off = *offp;
2885 1.2 simonb int error;
2886 1.34 mlelstv daddr_t pbn;
2887 1.2 simonb
2888 1.114 riastrad KASSERT(((len >> wr->wr_log_dev_bshift) << wr->wr_log_dev_bshift) ==
2889 1.114 riastrad len);
2890 1.34 mlelstv
2891 1.14 joerg if (off < wr->wr_circ_off)
2892 1.14 joerg off = wr->wr_circ_off;
2893 1.14 joerg slen = wr->wr_circ_off + wr->wr_circ_size - off;
2894 1.2 simonb if (slen < len) {
2895 1.34 mlelstv pbn = wr->wr_logpbn + (off >> wr->wr_log_dev_bshift);
2896 1.34 mlelstv #ifdef _KERNEL
2897 1.34 mlelstv pbn = btodb(pbn << wr->wr_log_dev_bshift);
2898 1.34 mlelstv #endif
2899 1.34 mlelstv error = wapbl_read(data, slen, wr->wr_devvp, pbn);
2900 1.2 simonb if (error)
2901 1.2 simonb return error;
2902 1.2 simonb data = (uint8_t *)data + slen;
2903 1.2 simonb len -= slen;
2904 1.14 joerg off = wr->wr_circ_off;
2905 1.2 simonb }
2906 1.34 mlelstv pbn = wr->wr_logpbn + (off >> wr->wr_log_dev_bshift);
2907 1.34 mlelstv #ifdef _KERNEL
2908 1.34 mlelstv pbn = btodb(pbn << wr->wr_log_dev_bshift);
2909 1.34 mlelstv #endif
2910 1.34 mlelstv error = wapbl_read(data, len, wr->wr_devvp, pbn);
2911 1.2 simonb if (error)
2912 1.2 simonb return error;
2913 1.2 simonb off += len;
2914 1.14 joerg if (off >= wr->wr_circ_off + wr->wr_circ_size)
2915 1.14 joerg off = wr->wr_circ_off;
2916 1.2 simonb *offp = off;
2917 1.2 simonb return 0;
2918 1.2 simonb }
2919 1.2 simonb
2920 1.71 riastrad /*
2921 1.71 riastrad * wapbl_circ_advance(wr, len, offp)
2922 1.71 riastrad *
2923 1.71 riastrad * Compute the linear byte offset of the circular queue of wr that
2924 1.71 riastrad * is len bytes past *offp, and store it in *offp.
2925 1.71 riastrad *
2926 1.71 riastrad * This is as if wapbl_circ_read, but without actually reading
2927 1.71 riastrad * anything.
2928 1.71 riastrad *
2929 1.71 riastrad * If the starting linear byte offset precedes wr->wr_circ_off, it
2930 1.71 riastrad * is taken to be wr->wr_circ_off instead. XXX WTF? This should
2931 1.71 riastrad * be a KASSERT, not a conditional.
2932 1.71 riastrad */
2933 1.2 simonb static void
2934 1.2 simonb wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2935 1.2 simonb {
2936 1.2 simonb size_t slen;
2937 1.2 simonb off_t off = *offp;
2938 1.2 simonb
2939 1.114 riastrad KASSERT(((len >> wr->wr_log_dev_bshift) << wr->wr_log_dev_bshift) ==
2940 1.114 riastrad len);
2941 1.2 simonb
2942 1.14 joerg if (off < wr->wr_circ_off)
2943 1.14 joerg off = wr->wr_circ_off;
2944 1.14 joerg slen = wr->wr_circ_off + wr->wr_circ_size - off;
2945 1.2 simonb if (slen < len) {
2946 1.2 simonb len -= slen;
2947 1.14 joerg off = wr->wr_circ_off;
2948 1.2 simonb }
2949 1.2 simonb off += len;
2950 1.14 joerg if (off >= wr->wr_circ_off + wr->wr_circ_size)
2951 1.14 joerg off = wr->wr_circ_off;
2952 1.2 simonb *offp = off;
2953 1.2 simonb }
2954 1.2 simonb
2955 1.2 simonb /****************************************************************/
2956 1.2 simonb
2957 1.2 simonb int
2958 1.2 simonb wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2959 1.114 riastrad daddr_t off, size_t count, size_t blksize)
2960 1.2 simonb {
2961 1.2 simonb struct wapbl_replay *wr;
2962 1.2 simonb int error;
2963 1.2 simonb struct vnode *devvp;
2964 1.2 simonb daddr_t logpbn;
2965 1.2 simonb uint8_t *scratch;
2966 1.2 simonb struct wapbl_wc_header *wch;
2967 1.2 simonb struct wapbl_wc_header *wch2;
2968 1.2 simonb /* Use this until we read the actual log header */
2969 1.31 mlelstv int log_dev_bshift = ilog2(blksize);
2970 1.2 simonb size_t used;
2971 1.34 mlelstv daddr_t pbn;
2972 1.2 simonb
2973 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2974 1.114 riastrad ("wapbl_replay_start: vp=%p off=%"PRId64" count=%zu blksize=%zu\n",
2975 1.114 riastrad vp, off, count, blksize));
2976 1.2 simonb
2977 1.2 simonb if (off < 0)
2978 1.116 riastrad return SET_ERROR(EINVAL);
2979 1.2 simonb
2980 1.2 simonb if (blksize < DEV_BSIZE)
2981 1.116 riastrad return SET_ERROR(EINVAL);
2982 1.2 simonb if (blksize % DEV_BSIZE)
2983 1.116 riastrad return SET_ERROR(EINVAL);
2984 1.2 simonb
2985 1.2 simonb #ifdef _KERNEL
2986 1.2 simonb #if 0
2987 1.2 simonb /* XXX vp->v_size isn't reliably set for VBLK devices,
2988 1.2 simonb * especially root. However, we might still want to verify
2989 1.2 simonb * that the full load is readable */
2990 1.2 simonb if ((off + count) * blksize > vp->v_size)
2991 1.116 riastrad return SET_ERROR(EINVAL);
2992 1.2 simonb #endif
2993 1.2 simonb if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2994 1.2 simonb return error;
2995 1.2 simonb }
2996 1.2 simonb #else /* ! _KERNEL */
2997 1.2 simonb devvp = vp;
2998 1.2 simonb logpbn = off;
2999 1.2 simonb #endif /* ! _KERNEL */
3000 1.2 simonb
3001 1.51 para scratch = wapbl_alloc(MAXBSIZE);
3002 1.2 simonb
3003 1.34 mlelstv pbn = logpbn;
3004 1.34 mlelstv #ifdef _KERNEL
3005 1.34 mlelstv pbn = btodb(pbn << log_dev_bshift);
3006 1.34 mlelstv #endif
3007 1.34 mlelstv error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, pbn);
3008 1.2 simonb if (error)
3009 1.2 simonb goto errout;
3010 1.2 simonb
3011 1.2 simonb wch = (struct wapbl_wc_header *)scratch;
3012 1.2 simonb wch2 =
3013 1.2 simonb (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
3014 1.2 simonb /* XXX verify checksums and magic numbers */
3015 1.2 simonb if (wch->wc_type != WAPBL_WC_HEADER) {
3016 1.2 simonb printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
3017 1.116 riastrad error = SET_ERROR(EFTYPE);
3018 1.2 simonb goto errout;
3019 1.2 simonb }
3020 1.2 simonb
3021 1.2 simonb if (wch2->wc_generation > wch->wc_generation)
3022 1.2 simonb wch = wch2;
3023 1.2 simonb
3024 1.2 simonb wr = wapbl_calloc(1, sizeof(*wr));
3025 1.2 simonb
3026 1.2 simonb wr->wr_logvp = vp;
3027 1.2 simonb wr->wr_devvp = devvp;
3028 1.2 simonb wr->wr_logpbn = logpbn;
3029 1.2 simonb
3030 1.2 simonb wr->wr_scratch = scratch;
3031 1.2 simonb
3032 1.14 joerg wr->wr_log_dev_bshift = wch->wc_log_dev_bshift;
3033 1.14 joerg wr->wr_fs_dev_bshift = wch->wc_fs_dev_bshift;
3034 1.14 joerg wr->wr_circ_off = wch->wc_circ_off;
3035 1.14 joerg wr->wr_circ_size = wch->wc_circ_size;
3036 1.14 joerg wr->wr_generation = wch->wc_generation;
3037 1.2 simonb
3038 1.2 simonb used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
3039 1.2 simonb
3040 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
3041 1.2 simonb ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
3042 1.114 riastrad " len=%"PRId64" used=%zu\n",
3043 1.114 riastrad wch->wc_head, wch->wc_tail, wch->wc_circ_off,
3044 1.114 riastrad wch->wc_circ_size, used));
3045 1.2 simonb
3046 1.2 simonb wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
3047 1.11 joerg
3048 1.14 joerg error = wapbl_replay_process(wr, wch->wc_head, wch->wc_tail);
3049 1.2 simonb if (error) {
3050 1.2 simonb wapbl_replay_stop(wr);
3051 1.2 simonb wapbl_replay_free(wr);
3052 1.2 simonb return error;
3053 1.2 simonb }
3054 1.2 simonb
3055 1.2 simonb *wrp = wr;
3056 1.2 simonb return 0;
3057 1.2 simonb
3058 1.114 riastrad errout:
3059 1.18 yamt wapbl_free(scratch, MAXBSIZE);
3060 1.2 simonb return error;
3061 1.2 simonb }
3062 1.2 simonb
3063 1.2 simonb void
3064 1.2 simonb wapbl_replay_stop(struct wapbl_replay *wr)
3065 1.2 simonb {
3066 1.2 simonb
3067 1.4 joerg if (!wapbl_replay_isopen(wr))
3068 1.4 joerg return;
3069 1.4 joerg
3070 1.2 simonb WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
3071 1.2 simonb
3072 1.18 yamt wapbl_free(wr->wr_scratch, MAXBSIZE);
3073 1.18 yamt wr->wr_scratch = NULL;
3074 1.2 simonb
3075 1.18 yamt wr->wr_logvp = NULL;
3076 1.2 simonb
3077 1.2 simonb wapbl_blkhash_clear(wr);
3078 1.2 simonb wapbl_blkhash_free(wr);
3079 1.2 simonb }
3080 1.2 simonb
3081 1.2 simonb void
3082 1.2 simonb wapbl_replay_free(struct wapbl_replay *wr)
3083 1.2 simonb {
3084 1.2 simonb
3085 1.2 simonb KDASSERT(!wapbl_replay_isopen(wr));
3086 1.2 simonb
3087 1.114 riastrad if (wr->wr_inodes) {
3088 1.18 yamt wapbl_free(wr->wr_inodes,
3089 1.18 yamt wr->wr_inodescnt * sizeof(wr->wr_inodes[0]));
3090 1.114 riastrad }
3091 1.18 yamt wapbl_free(wr, sizeof(*wr));
3092 1.2 simonb }
3093 1.2 simonb
3094 1.4 joerg #ifdef _KERNEL
3095 1.2 simonb int
3096 1.2 simonb wapbl_replay_isopen1(struct wapbl_replay *wr)
3097 1.2 simonb {
3098 1.2 simonb
3099 1.2 simonb return wapbl_replay_isopen(wr);
3100 1.2 simonb }
3101 1.4 joerg #endif
3102 1.2 simonb
3103 1.62 mlelstv /*
3104 1.62 mlelstv * calculate the disk address for the i'th block in the wc_blockblist
3105 1.62 mlelstv * offset by j blocks of size blen.
3106 1.62 mlelstv *
3107 1.62 mlelstv * wc_daddr is always a kernel disk address in DEV_BSIZE units that
3108 1.62 mlelstv * was written to the journal.
3109 1.62 mlelstv *
3110 1.62 mlelstv * The kernel needs that address plus the offset in DEV_BSIZE units.
3111 1.62 mlelstv *
3112 1.62 mlelstv * Userland needs that address plus the offset in blen units.
3113 1.62 mlelstv *
3114 1.62 mlelstv */
3115 1.62 mlelstv static daddr_t
3116 1.62 mlelstv wapbl_block_daddr(struct wapbl_wc_blocklist *wc, int i, int j, int blen)
3117 1.62 mlelstv {
3118 1.62 mlelstv daddr_t pbn;
3119 1.62 mlelstv
3120 1.62 mlelstv #ifdef _KERNEL
3121 1.62 mlelstv pbn = wc->wc_blocks[i].wc_daddr + btodb(j * blen);
3122 1.62 mlelstv #else
3123 1.62 mlelstv pbn = dbtob(wc->wc_blocks[i].wc_daddr) / blen + j;
3124 1.62 mlelstv #endif
3125 1.62 mlelstv
3126 1.62 mlelstv return pbn;
3127 1.62 mlelstv }
3128 1.62 mlelstv
3129 1.10 joerg static void
3130 1.10 joerg wapbl_replay_process_blocks(struct wapbl_replay *wr, off_t *offp)
3131 1.10 joerg {
3132 1.10 joerg struct wapbl_wc_blocklist *wc =
3133 1.10 joerg (struct wapbl_wc_blocklist *)wr->wr_scratch;
3134 1.14 joerg int fsblklen = 1 << wr->wr_fs_dev_bshift;
3135 1.10 joerg int i, j, n;
3136 1.10 joerg
3137 1.10 joerg for (i = 0; i < wc->wc_blkcount; i++) {
3138 1.10 joerg /*
3139 1.10 joerg * Enter each physical block into the hashtable independently.
3140 1.10 joerg */
3141 1.14 joerg n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
3142 1.10 joerg for (j = 0; j < n; j++) {
3143 1.114 riastrad wapbl_blkhash_ins(wr,
3144 1.114 riastrad wapbl_block_daddr(wc, i, j, fsblklen),
3145 1.10 joerg *offp);
3146 1.10 joerg wapbl_circ_advance(wr, fsblklen, offp);
3147 1.10 joerg }
3148 1.10 joerg }
3149 1.10 joerg }
3150 1.10 joerg
3151 1.10 joerg static void
3152 1.10 joerg wapbl_replay_process_revocations(struct wapbl_replay *wr)
3153 1.10 joerg {
3154 1.10 joerg struct wapbl_wc_blocklist *wc =
3155 1.10 joerg (struct wapbl_wc_blocklist *)wr->wr_scratch;
3156 1.34 mlelstv int fsblklen = 1 << wr->wr_fs_dev_bshift;
3157 1.10 joerg int i, j, n;
3158 1.10 joerg
3159 1.10 joerg for (i = 0; i < wc->wc_blkcount; i++) {
3160 1.10 joerg /*
3161 1.10 joerg * Remove any blocks found from the hashtable.
3162 1.10 joerg */
3163 1.14 joerg n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
3164 1.114 riastrad for (j = 0; j < n; j++) {
3165 1.114 riastrad wapbl_blkhash_rem(wr, wapbl_block_daddr(wc, i, j,
3166 1.114 riastrad fsblklen));
3167 1.114 riastrad }
3168 1.10 joerg }
3169 1.10 joerg }
3170 1.10 joerg
3171 1.10 joerg static void
3172 1.114 riastrad wapbl_replay_process_inodes(struct wapbl_replay *wr, off_t oldoff,
3173 1.114 riastrad off_t newoff)
3174 1.10 joerg {
3175 1.10 joerg struct wapbl_wc_inodelist *wc =
3176 1.10 joerg (struct wapbl_wc_inodelist *)wr->wr_scratch;
3177 1.18 yamt void *new_inodes;
3178 1.18 yamt const size_t oldsize = wr->wr_inodescnt * sizeof(wr->wr_inodes[0]);
3179 1.18 yamt
3180 1.18 yamt KASSERT(sizeof(wr->wr_inodes[0]) == sizeof(wc->wc_inodes[0]));
3181 1.18 yamt
3182 1.10 joerg /*
3183 1.10 joerg * Keep track of where we found this so location won't be
3184 1.10 joerg * overwritten.
3185 1.10 joerg */
3186 1.10 joerg if (wc->wc_clear) {
3187 1.10 joerg wr->wr_inodestail = oldoff;
3188 1.10 joerg wr->wr_inodescnt = 0;
3189 1.12 joerg if (wr->wr_inodes != NULL) {
3190 1.18 yamt wapbl_free(wr->wr_inodes, oldsize);
3191 1.12 joerg wr->wr_inodes = NULL;
3192 1.12 joerg }
3193 1.10 joerg }
3194 1.10 joerg wr->wr_inodeshead = newoff;
3195 1.10 joerg if (wc->wc_inocnt == 0)
3196 1.10 joerg return;
3197 1.10 joerg
3198 1.51 para new_inodes = wapbl_alloc((wr->wr_inodescnt + wc->wc_inocnt) *
3199 1.18 yamt sizeof(wr->wr_inodes[0]));
3200 1.18 yamt if (wr->wr_inodes != NULL) {
3201 1.18 yamt memcpy(new_inodes, wr->wr_inodes, oldsize);
3202 1.18 yamt wapbl_free(wr->wr_inodes, oldsize);
3203 1.18 yamt }
3204 1.18 yamt wr->wr_inodes = new_inodes;
3205 1.10 joerg memcpy(&wr->wr_inodes[wr->wr_inodescnt], wc->wc_inodes,
3206 1.18 yamt wc->wc_inocnt * sizeof(wr->wr_inodes[0]));
3207 1.10 joerg wr->wr_inodescnt += wc->wc_inocnt;
3208 1.10 joerg }
3209 1.10 joerg
3210 1.2 simonb static int
3211 1.14 joerg wapbl_replay_process(struct wapbl_replay *wr, off_t head, off_t tail)
3212 1.2 simonb {
3213 1.2 simonb off_t off;
3214 1.2 simonb int error;
3215 1.2 simonb
3216 1.14 joerg int logblklen = 1 << wr->wr_log_dev_bshift;
3217 1.2 simonb
3218 1.2 simonb wapbl_blkhash_clear(wr);
3219 1.2 simonb
3220 1.14 joerg off = tail;
3221 1.14 joerg while (off != head) {
3222 1.2 simonb struct wapbl_wc_null *wcn;
3223 1.2 simonb off_t saveoff = off;
3224 1.2 simonb error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
3225 1.2 simonb if (error)
3226 1.2 simonb goto errout;
3227 1.2 simonb wcn = (struct wapbl_wc_null *)wr->wr_scratch;
3228 1.2 simonb switch (wcn->wc_type) {
3229 1.2 simonb case WAPBL_WC_BLOCKS:
3230 1.10 joerg wapbl_replay_process_blocks(wr, &off);
3231 1.2 simonb break;
3232 1.2 simonb
3233 1.2 simonb case WAPBL_WC_REVOCATIONS:
3234 1.10 joerg wapbl_replay_process_revocations(wr);
3235 1.2 simonb break;
3236 1.2 simonb
3237 1.2 simonb case WAPBL_WC_INODES:
3238 1.10 joerg wapbl_replay_process_inodes(wr, saveoff, off);
3239 1.2 simonb break;
3240 1.10 joerg
3241 1.2 simonb default:
3242 1.2 simonb printf("Unrecognized wapbl type: 0x%08x\n",
3243 1.114 riastrad wcn->wc_type);
3244 1.116 riastrad error = SET_ERROR(EFTYPE);
3245 1.2 simonb goto errout;
3246 1.2 simonb }
3247 1.2 simonb wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
3248 1.2 simonb if (off != saveoff) {
3249 1.2 simonb printf("wapbl_replay: corrupted records\n");
3250 1.116 riastrad error = SET_ERROR(EFTYPE);
3251 1.2 simonb goto errout;
3252 1.2 simonb }
3253 1.2 simonb }
3254 1.2 simonb return 0;
3255 1.2 simonb
3256 1.114 riastrad errout:
3257 1.2 simonb wapbl_blkhash_clear(wr);
3258 1.2 simonb return error;
3259 1.2 simonb }
3260 1.2 simonb
3261 1.13 joerg #if 0
3262 1.2 simonb int
3263 1.2 simonb wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
3264 1.2 simonb {
3265 1.2 simonb off_t off;
3266 1.2 simonb int mismatchcnt = 0;
3267 1.14 joerg int logblklen = 1 << wr->wr_log_dev_bshift;
3268 1.14 joerg int fsblklen = 1 << wr->wr_fs_dev_bshift;
3269 1.51 para void *scratch1 = wapbl_alloc(MAXBSIZE);
3270 1.51 para void *scratch2 = wapbl_alloc(MAXBSIZE);
3271 1.2 simonb int error = 0;
3272 1.2 simonb
3273 1.2 simonb KDASSERT(wapbl_replay_isopen(wr));
3274 1.2 simonb
3275 1.2 simonb off = wch->wc_tail;
3276 1.2 simonb while (off != wch->wc_head) {
3277 1.2 simonb struct wapbl_wc_null *wcn;
3278 1.2 simonb #ifdef DEBUG
3279 1.2 simonb off_t saveoff = off;
3280 1.2 simonb #endif
3281 1.2 simonb error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
3282 1.2 simonb if (error)
3283 1.2 simonb goto out;
3284 1.2 simonb wcn = (struct wapbl_wc_null *)wr->wr_scratch;
3285 1.2 simonb switch (wcn->wc_type) {
3286 1.114 riastrad case WAPBL_WC_BLOCKS: {
3287 1.114 riastrad struct wapbl_wc_blocklist *wc =
3288 1.114 riastrad (struct wapbl_wc_blocklist *)wr->wr_scratch;
3289 1.114 riastrad int i;
3290 1.114 riastrad for (i = 0; i < wc->wc_blkcount; i++) {
3291 1.114 riastrad int foundcnt = 0;
3292 1.114 riastrad int dirtycnt = 0;
3293 1.114 riastrad int j, n;
3294 1.114 riastrad /*
3295 1.114 riastrad * Check each physical block into the
3296 1.114 riastrad * hashtable independently
3297 1.114 riastrad */
3298 1.114 riastrad n = wc->wc_blocks[i].wc_dlen >>
3299 1.114 riastrad wch->wc_fs_dev_bshift;
3300 1.114 riastrad for (j = 0; j < n; j++) {
3301 1.114 riastrad struct wapbl_blk *wb =
3302 1.114 riastrad wapbl_blkhash_get(wr,
3303 1.114 riastrad wapbl_block_daddr(wc, i, j,
3304 1.114 riastrad fsblklen));
3305 1.114 riastrad if (wb && wb->wb_off == off) {
3306 1.114 riastrad foundcnt++;
3307 1.114 riastrad error =
3308 1.114 riastrad wapbl_circ_read(wr,
3309 1.114 riastrad scratch1, fsblklen,
3310 1.114 riastrad &off);
3311 1.114 riastrad if (error)
3312 1.114 riastrad goto out;
3313 1.114 riastrad error =
3314 1.114 riastrad wapbl_read(scratch2,
3315 1.114 riastrad fsblklen, fsdevvp,
3316 1.114 riastrad wb->wb_blk);
3317 1.114 riastrad if (error)
3318 1.114 riastrad goto out;
3319 1.114 riastrad if (memcmp(scratch1,
3320 1.114 riastrad scratch2,
3321 1.114 riastrad fsblklen)) {
3322 1.114 riastrad printf("wapbl_verify:"
3323 1.114 riastrad " mismatch block"
3324 1.114 riastrad " %"PRId64
3325 1.114 riastrad " at off"
3326 1.114 riastrad " %"PRIdMAX"\n",
3327 1.114 riastrad wb->wb_blk,
3328 1.114 riastrad (intmax_t)off);
3329 1.114 riastrad dirtycnt++;
3330 1.114 riastrad mismatchcnt++;
3331 1.114 riastrad }
3332 1.114 riastrad } else {
3333 1.114 riastrad wapbl_circ_advance(wr,
3334 1.114 riastrad fsblklen, &off);
3335 1.114 riastrad }
3336 1.114 riastrad }
3337 1.114 riastrad #if 0
3338 1.114 riastrad /*
3339 1.114 riastrad * If all of the blocks in an entry
3340 1.114 riastrad * are clean, then remove all of its
3341 1.114 riastrad * blocks from the hashtable since they
3342 1.114 riastrad * never will need replay.
3343 1.114 riastrad */
3344 1.114 riastrad if (foundcnt != 0 && dirtycnt == 0) {
3345 1.114 riastrad off = saveoff;
3346 1.114 riastrad wapbl_circ_advance(wr, logblklen,
3347 1.114 riastrad &off);
3348 1.2 simonb for (j = 0; j < n; j++) {
3349 1.2 simonb struct wapbl_blk *wb =
3350 1.114 riastrad wapbl_blkhash_get(wr,
3351 1.114 riastrad wapbl_block_daddr(wc,
3352 1.114 riastrad i, j, fsblklen));
3353 1.114 riastrad if (wb &&
3354 1.114 riastrad (wb->wb_off == off)) {
3355 1.114 riastrad wapbl_blkhash_rem(wr,
3356 1.2 simonb wb->wb_blk);
3357 1.2 simonb }
3358 1.2 simonb wapbl_circ_advance(wr,
3359 1.114 riastrad fsblklen, &off);
3360 1.2 simonb }
3361 1.114 riastrad }
3362 1.2 simonb #endif
3363 1.2 simonb }
3364 1.114 riastrad }
3365 1.2 simonb break;
3366 1.2 simonb case WAPBL_WC_REVOCATIONS:
3367 1.2 simonb case WAPBL_WC_INODES:
3368 1.2 simonb break;
3369 1.2 simonb default:
3370 1.2 simonb KASSERT(0);
3371 1.2 simonb }
3372 1.2 simonb #ifdef DEBUG
3373 1.2 simonb wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
3374 1.2 simonb KASSERT(off == saveoff);
3375 1.2 simonb #endif
3376 1.2 simonb }
3377 1.114 riastrad out:
3378 1.18 yamt wapbl_free(scratch1, MAXBSIZE);
3379 1.18 yamt wapbl_free(scratch2, MAXBSIZE);
3380 1.2 simonb if (!error && mismatchcnt)
3381 1.116 riastrad error = SET_ERROR(EFTYPE);
3382 1.2 simonb return error;
3383 1.2 simonb }
3384 1.2 simonb #endif
3385 1.2 simonb
3386 1.2 simonb int
3387 1.2 simonb wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
3388 1.2 simonb {
3389 1.9 joerg struct wapbl_blk *wb;
3390 1.9 joerg size_t i;
3391 1.2 simonb off_t off;
3392 1.9 joerg void *scratch;
3393 1.2 simonb int error = 0;
3394 1.14 joerg int fsblklen = 1 << wr->wr_fs_dev_bshift;
3395 1.2 simonb
3396 1.2 simonb KDASSERT(wapbl_replay_isopen(wr));
3397 1.2 simonb
3398 1.51 para scratch = wapbl_alloc(MAXBSIZE);
3399 1.2 simonb
3400 1.37 drochner for (i = 0; i <= wr->wr_blkhashmask; ++i) {
3401 1.9 joerg LIST_FOREACH(wb, &wr->wr_blkhash[i], wb_hash) {
3402 1.9 joerg off = wb->wb_off;
3403 1.9 joerg error = wapbl_circ_read(wr, scratch, fsblklen, &off);
3404 1.9 joerg if (error)
3405 1.9 joerg break;
3406 1.9 joerg error = wapbl_write(scratch, fsblklen, fsdevvp,
3407 1.9 joerg wb->wb_blk);
3408 1.9 joerg if (error)
3409 1.9 joerg break;
3410 1.2 simonb }
3411 1.2 simonb }
3412 1.9 joerg
3413 1.18 yamt wapbl_free(scratch, MAXBSIZE);
3414 1.2 simonb return error;
3415 1.2 simonb }
3416 1.2 simonb
3417 1.2 simonb int
3418 1.6 joerg wapbl_replay_can_read(struct wapbl_replay *wr, daddr_t blk, long len)
3419 1.6 joerg {
3420 1.14 joerg int fsblklen = 1 << wr->wr_fs_dev_bshift;
3421 1.6 joerg
3422 1.6 joerg KDASSERT(wapbl_replay_isopen(wr));
3423 1.6 joerg KASSERT((len % fsblklen) == 0);
3424 1.6 joerg
3425 1.6 joerg while (len != 0) {
3426 1.6 joerg struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
3427 1.6 joerg if (wb)
3428 1.6 joerg return 1;
3429 1.6 joerg len -= fsblklen;
3430 1.6 joerg }
3431 1.6 joerg return 0;
3432 1.6 joerg }
3433 1.6 joerg
3434 1.6 joerg int
3435 1.2 simonb wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
3436 1.2 simonb {
3437 1.14 joerg int fsblklen = 1 << wr->wr_fs_dev_bshift;
3438 1.2 simonb
3439 1.2 simonb KDASSERT(wapbl_replay_isopen(wr));
3440 1.2 simonb
3441 1.2 simonb KASSERT((len % fsblklen) == 0);
3442 1.2 simonb
3443 1.2 simonb while (len != 0) {
3444 1.2 simonb struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
3445 1.2 simonb if (wb) {
3446 1.2 simonb off_t off = wb->wb_off;
3447 1.2 simonb int error;
3448 1.2 simonb error = wapbl_circ_read(wr, data, fsblklen, &off);
3449 1.2 simonb if (error)
3450 1.2 simonb return error;
3451 1.2 simonb }
3452 1.2 simonb data = (uint8_t *)data + fsblklen;
3453 1.2 simonb len -= fsblklen;
3454 1.2 simonb blk++;
3455 1.2 simonb }
3456 1.2 simonb return 0;
3457 1.2 simonb }
3458 1.35 pooka
3459 1.36 pooka #ifdef _KERNEL
3460 1.64 pgoyette
3461 1.35 pooka MODULE(MODULE_CLASS_VFS, wapbl, NULL);
3462 1.35 pooka
3463 1.35 pooka static int
3464 1.35 pooka wapbl_modcmd(modcmd_t cmd, void *arg)
3465 1.35 pooka {
3466 1.35 pooka
3467 1.35 pooka switch (cmd) {
3468 1.35 pooka case MODULE_CMD_INIT:
3469 1.39 christos wapbl_init();
3470 1.35 pooka return 0;
3471 1.35 pooka case MODULE_CMD_FINI:
3472 1.74 riastrad return wapbl_fini();
3473 1.35 pooka default:
3474 1.116 riastrad return SET_ERROR(ENOTTY);
3475 1.35 pooka }
3476 1.35 pooka }
3477 1.114 riastrad
3478 1.36 pooka #endif /* _KERNEL */
3479