lfs_bio.c revision 1.103.10.2 1 1.103.10.2 ad /* $NetBSD: lfs_bio.c,v 1.103.10.2 2007/07/29 13:31:15 ad Exp $ */
2 1.103.10.2 ad
3 1.103.10.2 ad /*-
4 1.103.10.2 ad * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5 1.103.10.2 ad * All rights reserved.
6 1.103.10.2 ad *
7 1.103.10.2 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.103.10.2 ad * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 1.103.10.2 ad *
10 1.103.10.2 ad * Redistribution and use in source and binary forms, with or without
11 1.103.10.2 ad * modification, are permitted provided that the following conditions
12 1.103.10.2 ad * are met:
13 1.103.10.2 ad * 1. Redistributions of source code must retain the above copyright
14 1.103.10.2 ad * notice, this list of conditions and the following disclaimer.
15 1.103.10.2 ad * 2. Redistributions in binary form must reproduce the above copyright
16 1.103.10.2 ad * notice, this list of conditions and the following disclaimer in the
17 1.103.10.2 ad * documentation and/or other materials provided with the distribution.
18 1.103.10.2 ad * 3. All advertising materials mentioning features or use of this software
19 1.103.10.2 ad * must display the following acknowledgement:
20 1.103.10.2 ad * This product includes software developed by the NetBSD
21 1.103.10.2 ad * Foundation, Inc. and its contributors.
22 1.103.10.2 ad * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.103.10.2 ad * contributors may be used to endorse or promote products derived
24 1.103.10.2 ad * from this software without specific prior written permission.
25 1.103.10.2 ad *
26 1.103.10.2 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.103.10.2 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.103.10.2 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.103.10.2 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.103.10.2 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.103.10.2 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.103.10.2 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.103.10.2 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.103.10.2 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.103.10.2 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.103.10.2 ad * POSSIBILITY OF SUCH DAMAGE.
37 1.103.10.2 ad */
38 1.103.10.2 ad /*
39 1.103.10.2 ad * Copyright (c) 1991, 1993
40 1.103.10.2 ad * The Regents of the University of California. All rights reserved.
41 1.103.10.2 ad *
42 1.103.10.2 ad * Redistribution and use in source and binary forms, with or without
43 1.103.10.2 ad * modification, are permitted provided that the following conditions
44 1.103.10.2 ad * are met:
45 1.103.10.2 ad * 1. Redistributions of source code must retain the above copyright
46 1.103.10.2 ad * notice, this list of conditions and the following disclaimer.
47 1.103.10.2 ad * 2. Redistributions in binary form must reproduce the above copyright
48 1.103.10.2 ad * notice, this list of conditions and the following disclaimer in the
49 1.103.10.2 ad * documentation and/or other materials provided with the distribution.
50 1.103.10.2 ad * 3. Neither the name of the University nor the names of its contributors
51 1.103.10.2 ad * may be used to endorse or promote products derived from this software
52 1.103.10.2 ad * without specific prior written permission.
53 1.103.10.2 ad *
54 1.103.10.2 ad * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 1.103.10.2 ad * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 1.103.10.2 ad * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 1.103.10.2 ad * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 1.103.10.2 ad * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 1.103.10.2 ad * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 1.103.10.2 ad * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 1.103.10.2 ad * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 1.103.10.2 ad * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 1.103.10.2 ad * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 1.103.10.2 ad * SUCH DAMAGE.
65 1.103.10.2 ad *
66 1.103.10.2 ad * @(#)lfs_bio.c 8.10 (Berkeley) 6/10/95
67 1.103.10.2 ad */
68 1.103.10.2 ad
69 1.103.10.2 ad #include <sys/cdefs.h>
70 1.103.10.2 ad __KERNEL_RCSID(0, "$NetBSD: lfs_bio.c,v 1.103.10.2 2007/07/29 13:31:15 ad Exp $");
71 1.103.10.2 ad
72 1.103.10.2 ad #include <sys/param.h>
73 1.103.10.2 ad #include <sys/systm.h>
74 1.103.10.2 ad #include <sys/proc.h>
75 1.103.10.2 ad #include <sys/buf.h>
76 1.103.10.2 ad #include <sys/vnode.h>
77 1.103.10.2 ad #include <sys/resourcevar.h>
78 1.103.10.2 ad #include <sys/mount.h>
79 1.103.10.2 ad #include <sys/kernel.h>
80 1.103.10.2 ad #include <sys/kauth.h>
81 1.103.10.2 ad
82 1.103.10.2 ad #include <ufs/ufs/inode.h>
83 1.103.10.2 ad #include <ufs/ufs/ufsmount.h>
84 1.103.10.2 ad #include <ufs/ufs/ufs_extern.h>
85 1.103.10.2 ad
86 1.103.10.2 ad #include <ufs/lfs/lfs.h>
87 1.103.10.2 ad #include <ufs/lfs/lfs_extern.h>
88 1.103.10.2 ad
89 1.103.10.2 ad #include <uvm/uvm.h>
90 1.103.10.2 ad
91 1.103.10.2 ad /*
92 1.103.10.2 ad * LFS block write function.
93 1.103.10.2 ad *
94 1.103.10.2 ad * XXX
95 1.103.10.2 ad * No write cost accounting is done.
96 1.103.10.2 ad * This is almost certainly wrong for synchronous operations and NFS.
97 1.103.10.2 ad *
98 1.103.10.2 ad * protected by lfs_subsys_lock.
99 1.103.10.2 ad */
100 1.103.10.2 ad int locked_queue_count = 0; /* Count of locked-down buffers. */
101 1.103.10.2 ad long locked_queue_bytes = 0L; /* Total size of locked buffers. */
102 1.103.10.2 ad int lfs_subsys_pages = 0L; /* Total number LFS-written pages */
103 1.103.10.2 ad int lfs_fs_pagetrip = 0; /* # of pages to trip per-fs write */
104 1.103.10.2 ad int lfs_writing = 0; /* Set if already kicked off a writer
105 1.103.10.2 ad because of buffer space */
106 1.103.10.2 ad /* Lock for aboves */
107 1.103.10.2 ad struct simplelock lfs_subsys_lock = SIMPLELOCK_INITIALIZER;
108 1.103.10.2 ad
109 1.103.10.2 ad extern int lfs_dostats;
110 1.103.10.2 ad
111 1.103.10.2 ad /*
112 1.103.10.2 ad * reserved number/bytes of locked buffers
113 1.103.10.2 ad */
114 1.103.10.2 ad int locked_queue_rcount = 0;
115 1.103.10.2 ad long locked_queue_rbytes = 0L;
116 1.103.10.2 ad
117 1.103.10.2 ad int lfs_fits_buf(struct lfs *, int, int);
118 1.103.10.2 ad int lfs_reservebuf(struct lfs *, struct vnode *vp, struct vnode *vp2,
119 1.103.10.2 ad int, int);
120 1.103.10.2 ad int lfs_reserveavail(struct lfs *, struct vnode *vp, struct vnode *vp2, int);
121 1.103.10.2 ad
122 1.103.10.2 ad int
123 1.103.10.2 ad lfs_fits_buf(struct lfs *fs, int n, int bytes)
124 1.103.10.2 ad {
125 1.103.10.2 ad int count_fit, bytes_fit;
126 1.103.10.2 ad
127 1.103.10.2 ad ASSERT_NO_SEGLOCK(fs);
128 1.103.10.2 ad LOCK_ASSERT(simple_lock_held(&lfs_subsys_lock));
129 1.103.10.2 ad
130 1.103.10.2 ad count_fit =
131 1.103.10.2 ad (locked_queue_count + locked_queue_rcount + n < LFS_WAIT_BUFS);
132 1.103.10.2 ad bytes_fit =
133 1.103.10.2 ad (locked_queue_bytes + locked_queue_rbytes + bytes < LFS_WAIT_BYTES);
134 1.103.10.2 ad
135 1.103.10.2 ad #ifdef DEBUG
136 1.103.10.2 ad if (!count_fit) {
137 1.103.10.2 ad DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit count: %d + %d + %d >= %d\n",
138 1.103.10.2 ad locked_queue_count, locked_queue_rcount,
139 1.103.10.2 ad n, LFS_WAIT_BUFS));
140 1.103.10.2 ad }
141 1.103.10.2 ad if (!bytes_fit) {
142 1.103.10.2 ad DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit bytes: %ld + %ld + %d >= %ld\n",
143 1.103.10.2 ad locked_queue_bytes, locked_queue_rbytes,
144 1.103.10.2 ad bytes, LFS_WAIT_BYTES));
145 1.103.10.2 ad }
146 1.103.10.2 ad #endif /* DEBUG */
147 1.103.10.2 ad
148 1.103.10.2 ad return (count_fit && bytes_fit);
149 1.103.10.2 ad }
150 1.103.10.2 ad
151 1.103.10.2 ad /* ARGSUSED */
152 1.103.10.2 ad int
153 1.103.10.2 ad lfs_reservebuf(struct lfs *fs, struct vnode *vp,
154 1.103.10.2 ad struct vnode *vp2, int n, int bytes)
155 1.103.10.2 ad {
156 1.103.10.2 ad ASSERT_MAYBE_SEGLOCK(fs);
157 1.103.10.2 ad KASSERT(locked_queue_rcount >= 0);
158 1.103.10.2 ad KASSERT(locked_queue_rbytes >= 0);
159 1.103.10.2 ad
160 1.103.10.2 ad simple_lock(&lfs_subsys_lock);
161 1.103.10.2 ad while (n > 0 && !lfs_fits_buf(fs, n, bytes)) {
162 1.103.10.2 ad int error;
163 1.103.10.2 ad
164 1.103.10.2 ad lfs_flush(fs, 0, 0);
165 1.103.10.2 ad
166 1.103.10.2 ad error = ltsleep(&locked_queue_count, PCATCH | PUSER,
167 1.103.10.2 ad "lfsresbuf", hz * LFS_BUFWAIT, &lfs_subsys_lock);
168 1.103.10.2 ad if (error && error != EWOULDBLOCK) {
169 1.103.10.2 ad simple_unlock(&lfs_subsys_lock);
170 1.103.10.2 ad return error;
171 1.103.10.2 ad }
172 1.103.10.2 ad }
173 1.103.10.2 ad
174 1.103.10.2 ad locked_queue_rcount += n;
175 1.103.10.2 ad locked_queue_rbytes += bytes;
176 1.103.10.2 ad
177 1.103.10.2 ad simple_unlock(&lfs_subsys_lock);
178 1.103.10.2 ad
179 1.103.10.2 ad KASSERT(locked_queue_rcount >= 0);
180 1.103.10.2 ad KASSERT(locked_queue_rbytes >= 0);
181 1.103.10.2 ad
182 1.103.10.2 ad return 0;
183 1.103.10.2 ad }
184 1.103.10.2 ad
185 1.103.10.2 ad /*
186 1.103.10.2 ad * Try to reserve some blocks, prior to performing a sensitive operation that
187 1.103.10.2 ad * requires the vnode lock to be honored. If there is not enough space, give
188 1.103.10.2 ad * up the vnode lock temporarily and wait for the space to become available.
189 1.103.10.2 ad *
190 1.103.10.2 ad * Called with vp locked. (Note nowever that if fsb < 0, vp is ignored.)
191 1.103.10.2 ad *
192 1.103.10.2 ad * XXX YAMT - it isn't safe to unlock vp here
193 1.103.10.2 ad * because the node might be modified while we sleep.
194 1.103.10.2 ad * (eg. cached states like i_offset might be stale,
195 1.103.10.2 ad * the vnode might be truncated, etc..)
196 1.103.10.2 ad * maybe we should have a way to restart the vnodeop (EVOPRESTART?)
197 1.103.10.2 ad * or rearrange vnodeop interface to leave vnode locking to file system
198 1.103.10.2 ad * specific code so that each file systems can have their own vnode locking and
199 1.103.10.2 ad * vnode re-using strategies.
200 1.103.10.2 ad */
201 1.103.10.2 ad int
202 1.103.10.2 ad lfs_reserveavail(struct lfs *fs, struct vnode *vp,
203 1.103.10.2 ad struct vnode *vp2, int fsb)
204 1.103.10.2 ad {
205 1.103.10.2 ad CLEANERINFO *cip;
206 1.103.10.2 ad struct buf *bp;
207 1.103.10.2 ad int error, slept;
208 1.103.10.2 ad
209 1.103.10.2 ad ASSERT_MAYBE_SEGLOCK(fs);
210 1.103.10.2 ad slept = 0;
211 1.103.10.2 ad simple_lock(&fs->lfs_interlock);
212 1.103.10.2 ad while (fsb > 0 && !lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail)) {
213 1.103.10.2 ad simple_unlock(&fs->lfs_interlock);
214 1.103.10.2 ad #if 0
215 1.103.10.2 ad /*
216 1.103.10.2 ad * XXX ideally, we should unlock vnodes here
217 1.103.10.2 ad * because we might sleep very long time.
218 1.103.10.2 ad */
219 1.103.10.2 ad VOP_UNLOCK(vp, 0);
220 1.103.10.2 ad if (vp2 != NULL) {
221 1.103.10.2 ad VOP_UNLOCK(vp2, 0);
222 1.103.10.2 ad }
223 1.103.10.2 ad #else
224 1.103.10.2 ad /*
225 1.103.10.2 ad * XXX since we'll sleep for cleaner with vnode lock holding,
226 1.103.10.2 ad * deadlock will occur if cleaner tries to lock the vnode.
227 1.103.10.2 ad * (eg. lfs_markv -> lfs_fastvget -> getnewvnode -> vclean)
228 1.103.10.2 ad */
229 1.103.10.2 ad #endif
230 1.103.10.2 ad
231 1.103.10.2 ad if (!slept) {
232 1.103.10.2 ad DLOG((DLOG_AVAIL, "lfs_reserve: waiting for %ld (bfree = %d,"
233 1.103.10.2 ad " est_bfree = %d)\n",
234 1.103.10.2 ad fsb + fs->lfs_ravail + fs->lfs_favail,
235 1.103.10.2 ad fs->lfs_bfree, LFS_EST_BFREE(fs)));
236 1.103.10.2 ad }
237 1.103.10.2 ad ++slept;
238 1.103.10.2 ad
239 1.103.10.2 ad /* Wake up the cleaner */
240 1.103.10.2 ad LFS_CLEANERINFO(cip, fs, bp);
241 1.103.10.2 ad LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
242 1.103.10.2 ad lfs_wakeup_cleaner(fs);
243 1.103.10.2 ad
244 1.103.10.2 ad simple_lock(&fs->lfs_interlock);
245 1.103.10.2 ad /* Cleaner might have run while we were reading, check again */
246 1.103.10.2 ad if (lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail))
247 1.103.10.2 ad break;
248 1.103.10.2 ad
249 1.103.10.2 ad error = ltsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_reserve",
250 1.103.10.2 ad 0, &fs->lfs_interlock);
251 1.103.10.2 ad #if 0
252 1.103.10.2 ad vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
253 1.103.10.2 ad vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
254 1.103.10.2 ad #endif
255 1.103.10.2 ad if (error) {
256 1.103.10.2 ad simple_unlock(&fs->lfs_interlock);
257 1.103.10.2 ad return error;
258 1.103.10.2 ad }
259 1.103.10.2 ad }
260 1.103.10.2 ad #ifdef DEBUG
261 1.103.10.2 ad if (slept) {
262 1.103.10.2 ad DLOG((DLOG_AVAIL, "lfs_reserve: woke up\n"));
263 1.103.10.2 ad }
264 1.103.10.2 ad #endif
265 1.103.10.2 ad fs->lfs_ravail += fsb;
266 1.103.10.2 ad simple_unlock(&fs->lfs_interlock);
267 1.103.10.2 ad
268 1.103.10.2 ad return 0;
269 1.103.10.2 ad }
270 1.103.10.2 ad
271 1.103.10.2 ad #ifdef DIAGNOSTIC
272 1.103.10.2 ad int lfs_rescount;
273 1.103.10.2 ad int lfs_rescountdirop;
274 1.103.10.2 ad #endif
275 1.103.10.2 ad
276 1.103.10.2 ad int
277 1.103.10.2 ad lfs_reserve(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
278 1.103.10.2 ad {
279 1.103.10.2 ad int error;
280 1.103.10.2 ad int cantwait;
281 1.103.10.2 ad
282 1.103.10.2 ad ASSERT_MAYBE_SEGLOCK(fs);
283 1.103.10.2 ad if (vp2) {
284 1.103.10.2 ad /* Make sure we're not in the process of reclaiming vp2 */
285 1.103.10.2 ad simple_lock(&fs->lfs_interlock);
286 1.103.10.2 ad while(fs->lfs_flags & LFS_UNDIROP) {
287 1.103.10.2 ad ltsleep(&fs->lfs_flags, PRIBIO + 1, "lfsrundirop", 0,
288 1.103.10.2 ad &fs->lfs_interlock);
289 1.103.10.2 ad }
290 1.103.10.2 ad simple_unlock(&fs->lfs_interlock);
291 1.103.10.2 ad }
292 1.103.10.2 ad
293 1.103.10.2 ad KASSERT(fsb < 0 || VOP_ISLOCKED(vp));
294 1.103.10.2 ad KASSERT(vp2 == NULL || fsb < 0 || VOP_ISLOCKED(vp2));
295 1.103.10.2 ad KASSERT(vp2 == NULL || !(VTOI(vp2)->i_flag & IN_ADIROP));
296 1.103.10.2 ad KASSERT(vp2 == NULL || vp2 != fs->lfs_unlockvp);
297 1.103.10.2 ad
298 1.103.10.2 ad cantwait = (VTOI(vp)->i_flag & IN_ADIROP) || fs->lfs_unlockvp == vp;
299 1.103.10.2 ad #ifdef DIAGNOSTIC
300 1.103.10.2 ad if (cantwait) {
301 1.103.10.2 ad if (fsb > 0)
302 1.103.10.2 ad lfs_rescountdirop++;
303 1.103.10.2 ad else if (fsb < 0)
304 1.103.10.2 ad lfs_rescountdirop--;
305 1.103.10.2 ad if (lfs_rescountdirop < 0)
306 1.103.10.2 ad panic("lfs_rescountdirop");
307 1.103.10.2 ad }
308 1.103.10.2 ad else {
309 1.103.10.2 ad if (fsb > 0)
310 1.103.10.2 ad lfs_rescount++;
311 1.103.10.2 ad else if (fsb < 0)
312 1.103.10.2 ad lfs_rescount--;
313 1.103.10.2 ad if (lfs_rescount < 0)
314 1.103.10.2 ad panic("lfs_rescount");
315 1.103.10.2 ad }
316 1.103.10.2 ad #endif
317 1.103.10.2 ad if (cantwait)
318 1.103.10.2 ad return 0;
319 1.103.10.2 ad
320 1.103.10.2 ad /*
321 1.103.10.2 ad * XXX
322 1.103.10.2 ad * vref vnodes here so that cleaner doesn't try to reuse them.
323 1.103.10.2 ad * (see XXX comment in lfs_reserveavail)
324 1.103.10.2 ad */
325 1.103.10.2 ad lfs_vref(vp);
326 1.103.10.2 ad if (vp2 != NULL) {
327 1.103.10.2 ad lfs_vref(vp2);
328 1.103.10.2 ad }
329 1.103.10.2 ad
330 1.103.10.2 ad error = lfs_reserveavail(fs, vp, vp2, fsb);
331 1.103.10.2 ad if (error)
332 1.103.10.2 ad goto done;
333 1.103.10.2 ad
334 1.103.10.2 ad /*
335 1.103.10.2 ad * XXX just a guess. should be more precise.
336 1.103.10.2 ad */
337 1.103.10.2 ad error = lfs_reservebuf(fs, vp, vp2,
338 1.103.10.2 ad fragstoblks(fs, fsb), fsbtob(fs, fsb));
339 1.103.10.2 ad if (error)
340 1.103.10.2 ad lfs_reserveavail(fs, vp, vp2, -fsb);
341 1.103.10.2 ad
342 1.103.10.2 ad done:
343 1.103.10.2 ad lfs_vunref(vp);
344 1.103.10.2 ad if (vp2 != NULL) {
345 1.103.10.2 ad lfs_vunref(vp2);
346 1.103.10.2 ad }
347 1.103.10.2 ad
348 1.103.10.2 ad return error;
349 1.103.10.2 ad }
350 1.103.10.2 ad
351 1.103.10.2 ad int
352 1.103.10.2 ad lfs_bwrite(void *v)
353 1.103.10.2 ad {
354 1.103.10.2 ad struct vop_bwrite_args /* {
355 1.103.10.2 ad struct buf *a_bp;
356 1.103.10.2 ad } */ *ap = v;
357 1.103.10.2 ad struct buf *bp = ap->a_bp;
358 1.103.10.2 ad
359 1.103.10.2 ad #ifdef DIAGNOSTIC
360 1.103.10.2 ad if (VTOI(bp->b_vp)->i_lfs->lfs_ronly == 0 && (bp->b_flags & B_ASYNC)) {
361 1.103.10.2 ad panic("bawrite LFS buffer");
362 1.103.10.2 ad }
363 1.103.10.2 ad #endif /* DIAGNOSTIC */
364 1.103.10.2 ad return lfs_bwrite_ext(bp, 0);
365 1.103.10.2 ad }
366 1.103.10.2 ad
367 1.103.10.2 ad /*
368 1.103.10.2 ad * Determine if there is enough room currently available to write fsb
369 1.103.10.2 ad * blocks. We need enough blocks for the new blocks, the current
370 1.103.10.2 ad * inode blocks (including potentially the ifile inode), a summary block,
371 1.103.10.2 ad * and the segment usage table, plus an ifile block.
372 1.103.10.2 ad */
373 1.103.10.2 ad int
374 1.103.10.2 ad lfs_fits(struct lfs *fs, int fsb)
375 1.103.10.2 ad {
376 1.103.10.2 ad int needed;
377 1.103.10.2 ad
378 1.103.10.2 ad ASSERT_NO_SEGLOCK(fs);
379 1.103.10.2 ad needed = fsb + btofsb(fs, fs->lfs_sumsize) +
380 1.103.10.2 ad ((howmany(fs->lfs_uinodes + 1, INOPB(fs)) + fs->lfs_segtabsz +
381 1.103.10.2 ad 1) << (fs->lfs_blktodb - fs->lfs_fsbtodb));
382 1.103.10.2 ad
383 1.103.10.2 ad if (needed >= fs->lfs_avail) {
384 1.103.10.2 ad #ifdef DEBUG
385 1.103.10.2 ad DLOG((DLOG_AVAIL, "lfs_fits: no fit: fsb = %ld, uinodes = %ld, "
386 1.103.10.2 ad "needed = %ld, avail = %ld\n",
387 1.103.10.2 ad (long)fsb, (long)fs->lfs_uinodes, (long)needed,
388 1.103.10.2 ad (long)fs->lfs_avail));
389 1.103.10.2 ad #endif
390 1.103.10.2 ad return 0;
391 1.103.10.2 ad }
392 1.103.10.2 ad return 1;
393 1.103.10.2 ad }
394 1.103.10.2 ad
395 1.103.10.2 ad int
396 1.103.10.2 ad lfs_availwait(struct lfs *fs, int fsb)
397 1.103.10.2 ad {
398 1.103.10.2 ad int error;
399 1.103.10.2 ad CLEANERINFO *cip;
400 1.103.10.2 ad struct buf *cbp;
401 1.103.10.2 ad
402 1.103.10.2 ad ASSERT_NO_SEGLOCK(fs);
403 1.103.10.2 ad /* Push cleaner blocks through regardless */
404 1.103.10.2 ad simple_lock(&fs->lfs_interlock);
405 1.103.10.2 ad if (LFS_SEGLOCK_HELD(fs) &&
406 1.103.10.2 ad fs->lfs_sp->seg_flags & (SEGM_CLEAN | SEGM_FORCE_CKP)) {
407 1.103.10.2 ad simple_unlock(&fs->lfs_interlock);
408 1.103.10.2 ad return 0;
409 1.103.10.2 ad }
410 1.103.10.2 ad simple_unlock(&fs->lfs_interlock);
411 1.103.10.2 ad
412 1.103.10.2 ad while (!lfs_fits(fs, fsb)) {
413 1.103.10.2 ad /*
414 1.103.10.2 ad * Out of space, need cleaner to run.
415 1.103.10.2 ad * Update the cleaner info, then wake it up.
416 1.103.10.2 ad * Note the cleanerinfo block is on the ifile
417 1.103.10.2 ad * so it CANT_WAIT.
418 1.103.10.2 ad */
419 1.103.10.2 ad LFS_CLEANERINFO(cip, fs, cbp);
420 1.103.10.2 ad LFS_SYNC_CLEANERINFO(cip, fs, cbp, 0);
421 1.103.10.2 ad
422 1.103.10.2 ad #ifdef DEBUG
423 1.103.10.2 ad DLOG((DLOG_AVAIL, "lfs_availwait: out of available space, "
424 1.103.10.2 ad "waiting on cleaner\n"));
425 1.103.10.2 ad #endif
426 1.103.10.2 ad
427 1.103.10.2 ad lfs_wakeup_cleaner(fs);
428 1.103.10.2 ad #ifdef DIAGNOSTIC
429 1.103.10.2 ad if (LFS_SEGLOCK_HELD(fs))
430 1.103.10.2 ad panic("lfs_availwait: deadlock");
431 1.103.10.2 ad #endif
432 1.103.10.2 ad error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "cleaner", 0);
433 1.103.10.2 ad if (error)
434 1.103.10.2 ad return (error);
435 1.103.10.2 ad }
436 1.103.10.2 ad return 0;
437 1.103.10.2 ad }
438 1.103.10.2 ad
439 1.103.10.2 ad int
440 1.103.10.2 ad lfs_bwrite_ext(struct buf *bp, int flags)
441 1.103.10.2 ad {
442 1.103.10.2 ad struct lfs *fs;
443 1.103.10.2 ad struct inode *ip;
444 1.103.10.2 ad int fsb, s;
445 1.103.10.2 ad
446 1.103.10.2 ad fs = VFSTOUFS(bp->b_vp->v_mount)->um_lfs;
447 1.103.10.2 ad
448 1.103.10.2 ad ASSERT_MAYBE_SEGLOCK(fs);
449 1.103.10.2 ad KASSERT(bp->b_flags & B_BUSY);
450 1.103.10.2 ad KASSERT(flags & BW_CLEAN || !LFS_IS_MALLOC_BUF(bp));
451 1.103.10.2 ad KASSERT((bp->b_flags & (B_DELWRI|B_LOCKED)) != B_DELWRI);
452 1.103.10.2 ad KASSERT((bp->b_flags & (B_DELWRI|B_LOCKED)) != B_LOCKED);
453 1.103.10.2 ad
454 1.103.10.2 ad /*
455 1.103.10.2 ad * Don't write *any* blocks if we're mounted read-only, or
456 1.103.10.2 ad * if we are "already unmounted".
457 1.103.10.2 ad *
458 1.103.10.2 ad * In particular the cleaner can't write blocks either.
459 1.103.10.2 ad */
460 1.103.10.2 ad if (fs->lfs_ronly || (fs->lfs_pflags & LFS_PF_CLEAN)) {
461 1.103.10.2 ad bp->b_flags &= ~(B_DELWRI | B_READ);
462 1.103.10.2 ad bp->b_error = 0;
463 1.103.10.2 ad LFS_UNLOCK_BUF(bp);
464 1.103.10.2 ad if (LFS_IS_MALLOC_BUF(bp))
465 1.103.10.2 ad bp->b_flags &= ~B_BUSY;
466 1.103.10.2 ad else
467 1.103.10.2 ad brelse(bp);
468 1.103.10.2 ad return (fs->lfs_ronly ? EROFS : 0);
469 1.103.10.2 ad }
470 1.103.10.2 ad
471 1.103.10.2 ad /*
472 1.103.10.2 ad * Set the delayed write flag and use reassignbuf to move the buffer
473 1.103.10.2 ad * from the clean list to the dirty one.
474 1.103.10.2 ad *
475 1.103.10.2 ad * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
476 1.103.10.2 ad * the buffer onto the LOCKED free list. This is necessary, otherwise
477 1.103.10.2 ad * getnewbuf() would try to reclaim the buffers using bawrite, which
478 1.103.10.2 ad * isn't going to work.
479 1.103.10.2 ad *
480 1.103.10.2 ad * XXX we don't let meta-data writes run out of space because they can
481 1.103.10.2 ad * come from the segment writer. We need to make sure that there is
482 1.103.10.2 ad * enough space reserved so that there's room to write meta-data
483 1.103.10.2 ad * blocks.
484 1.103.10.2 ad */
485 1.103.10.2 ad if (!(bp->b_flags & B_LOCKED)) {
486 1.103.10.2 ad fsb = fragstofsb(fs, numfrags(fs, bp->b_bcount));
487 1.103.10.2 ad
488 1.103.10.2 ad ip = VTOI(bp->b_vp);
489 1.103.10.2 ad if (flags & BW_CLEAN) {
490 1.103.10.2 ad LFS_SET_UINO(ip, IN_CLEANING);
491 1.103.10.2 ad } else {
492 1.103.10.2 ad LFS_SET_UINO(ip, IN_MODIFIED);
493 1.103.10.2 ad }
494 1.103.10.2 ad fs->lfs_avail -= fsb;
495 1.103.10.2 ad bp->b_flags |= B_DELWRI;
496 1.103.10.2 ad
497 1.103.10.2 ad LFS_LOCK_BUF(bp);
498 1.103.10.2 ad bp->b_flags &= ~(B_READ | B_DONE);
499 1.103.10.2 ad bp->b_error = 0;
500 1.103.10.2 ad s = splbio();
501 1.103.10.2 ad reassignbuf(bp, bp->b_vp);
502 1.103.10.2 ad splx(s);
503 1.103.10.2 ad }
504 1.103.10.2 ad
505 1.103.10.2 ad if (bp->b_flags & B_CALL)
506 1.103.10.2 ad bp->b_flags &= ~B_BUSY;
507 1.103.10.2 ad else
508 1.103.10.2 ad brelse(bp);
509 1.103.10.2 ad
510 1.103.10.2 ad return (0);
511 1.103.10.2 ad }
512 1.103.10.2 ad
513 1.103.10.2 ad /*
514 1.103.10.2 ad * Called and return with the lfs_interlock held, but no other simple_locks
515 1.103.10.2 ad * held.
516 1.103.10.2 ad */
517 1.103.10.2 ad void
518 1.103.10.2 ad lfs_flush_fs(struct lfs *fs, int flags)
519 1.103.10.2 ad {
520 1.103.10.2 ad ASSERT_NO_SEGLOCK(fs);
521 1.103.10.2 ad LOCK_ASSERT(simple_lock_held(&fs->lfs_interlock));
522 1.103.10.2 ad LOCK_ASSERT(!simple_lock_held(&lfs_subsys_lock));
523 1.103.10.2 ad if (fs->lfs_ronly)
524 1.103.10.2 ad return;
525 1.103.10.2 ad
526 1.103.10.2 ad simple_lock(&lfs_subsys_lock);
527 1.103.10.2 ad if (lfs_dostats)
528 1.103.10.2 ad ++lfs_stats.flush_invoked;
529 1.103.10.2 ad simple_unlock(&lfs_subsys_lock);
530 1.103.10.2 ad
531 1.103.10.2 ad simple_unlock(&fs->lfs_interlock);
532 1.103.10.2 ad lfs_writer_enter(fs, "fldirop");
533 1.103.10.2 ad lfs_segwrite(fs->lfs_ivnode->v_mount, flags);
534 1.103.10.2 ad lfs_writer_leave(fs);
535 1.103.10.2 ad simple_lock(&fs->lfs_interlock);
536 1.103.10.2 ad fs->lfs_favail = 0; /* XXX */
537 1.103.10.2 ad }
538 1.103.10.2 ad
539 1.103.10.2 ad /*
540 1.103.10.2 ad * This routine initiates segment writes when LFS is consuming too many
541 1.103.10.2 ad * resources. Ideally the pageout daemon would be able to direct LFS
542 1.103.10.2 ad * more subtly.
543 1.103.10.2 ad * XXX We have one static count of locked buffers;
544 1.103.10.2 ad * XXX need to think more about the multiple filesystem case.
545 1.103.10.2 ad *
546 1.103.10.2 ad * Called and return with lfs_subsys_lock held.
547 1.103.10.2 ad * If fs != NULL, we hold the segment lock for fs.
548 1.103.10.2 ad */
549 1.103.10.2 ad void
550 1.103.10.2 ad lfs_flush(struct lfs *fs, int flags, int only_onefs)
551 1.103.10.2 ad {
552 1.103.10.2 ad extern u_int64_t locked_fakequeue_count;
553 1.103.10.2 ad struct mount *mp, *nmp;
554 1.103.10.2 ad struct lfs *tfs;
555 1.103.10.2 ad
556 1.103.10.2 ad LOCK_ASSERT(simple_lock_held(&lfs_subsys_lock));
557 1.103.10.2 ad KDASSERT(fs == NULL || !LFS_SEGLOCK_HELD(fs));
558 1.103.10.2 ad
559 1.103.10.2 ad if (lfs_dostats)
560 1.103.10.2 ad ++lfs_stats.write_exceeded;
561 1.103.10.2 ad /* XXX should we include SEGM_CKP here? */
562 1.103.10.2 ad if (lfs_writing && !(flags & SEGM_SYNC)) {
563 1.103.10.2 ad DLOG((DLOG_FLUSH, "lfs_flush: not flushing because another flush is active\n"));
564 1.103.10.2 ad return;
565 1.103.10.2 ad }
566 1.103.10.2 ad while (lfs_writing)
567 1.103.10.2 ad ltsleep(&lfs_writing, PRIBIO + 1, "lfsflush", 0,
568 1.103.10.2 ad &lfs_subsys_lock);
569 1.103.10.2 ad lfs_writing = 1;
570 1.103.10.2 ad
571 1.103.10.2 ad simple_unlock(&lfs_subsys_lock);
572 1.103.10.2 ad
573 1.103.10.2 ad if (only_onefs) {
574 1.103.10.2 ad KASSERT(fs != NULL);
575 1.103.10.2 ad if (vfs_busy(fs->lfs_ivnode->v_mount, LK_NOWAIT,
576 1.103.10.2 ad &mountlist_slock))
577 1.103.10.2 ad goto errout;
578 1.103.10.2 ad simple_lock(&fs->lfs_interlock);
579 1.103.10.2 ad lfs_flush_fs(fs, flags);
580 1.103.10.2 ad simple_unlock(&fs->lfs_interlock);
581 1.103.10.2 ad vfs_unbusy(fs->lfs_ivnode->v_mount);
582 1.103.10.2 ad } else {
583 1.103.10.2 ad locked_fakequeue_count = 0;
584 1.103.10.2 ad simple_lock(&mountlist_slock);
585 1.103.10.2 ad for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
586 1.103.10.2 ad mp = nmp) {
587 1.103.10.2 ad if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
588 1.103.10.2 ad DLOG((DLOG_FLUSH, "lfs_flush: fs vfs_busy\n"));
589 1.103.10.2 ad nmp = CIRCLEQ_NEXT(mp, mnt_list);
590 1.103.10.2 ad continue;
591 1.103.10.2 ad }
592 1.103.10.2 ad if (strncmp(mp->mnt_stat.f_fstypename, MOUNT_LFS,
593 1.103.10.2 ad sizeof(mp->mnt_stat.f_fstypename)) == 0) {
594 1.103.10.2 ad tfs = VFSTOUFS(mp)->um_lfs;
595 1.103.10.2 ad simple_lock(&tfs->lfs_interlock);
596 1.103.10.2 ad lfs_flush_fs(tfs, flags);
597 1.103.10.2 ad simple_unlock(&tfs->lfs_interlock);
598 1.103.10.2 ad }
599 1.103.10.2 ad simple_lock(&mountlist_slock);
600 1.103.10.2 ad nmp = CIRCLEQ_NEXT(mp, mnt_list);
601 1.103.10.2 ad vfs_unbusy(mp);
602 1.103.10.2 ad }
603 1.103.10.2 ad simple_unlock(&mountlist_slock);
604 1.103.10.2 ad }
605 1.103.10.2 ad LFS_DEBUG_COUNTLOCKED("flush");
606 1.103.10.2 ad wakeup(&lfs_subsys_pages);
607 1.103.10.2 ad
608 1.103.10.2 ad errout:
609 1.103.10.2 ad simple_lock(&lfs_subsys_lock);
610 1.103.10.2 ad KASSERT(lfs_writing);
611 1.103.10.2 ad lfs_writing = 0;
612 1.103.10.2 ad wakeup(&lfs_writing);
613 1.103.10.2 ad }
614 1.103.10.2 ad
615 1.103.10.2 ad #define INOCOUNT(fs) howmany((fs)->lfs_uinodes, INOPB(fs))
616 1.103.10.2 ad #define INOBYTES(fs) ((fs)->lfs_uinodes * sizeof (struct ufs1_dinode))
617 1.103.10.2 ad
618 1.103.10.2 ad /*
619 1.103.10.2 ad * make sure that we don't have too many locked buffers.
620 1.103.10.2 ad * flush buffers if needed.
621 1.103.10.2 ad */
622 1.103.10.2 ad int
623 1.103.10.2 ad lfs_check(struct vnode *vp, daddr_t blkno, int flags)
624 1.103.10.2 ad {
625 1.103.10.2 ad int error;
626 1.103.10.2 ad struct lfs *fs;
627 1.103.10.2 ad struct inode *ip;
628 1.103.10.2 ad extern pid_t lfs_writer_daemon;
629 1.103.10.2 ad
630 1.103.10.2 ad error = 0;
631 1.103.10.2 ad ip = VTOI(vp);
632 1.103.10.2 ad
633 1.103.10.2 ad /* If out of buffers, wait on writer */
634 1.103.10.2 ad /* XXX KS - if it's the Ifile, we're probably the cleaner! */
635 1.103.10.2 ad if (ip->i_number == LFS_IFILE_INUM)
636 1.103.10.2 ad return 0;
637 1.103.10.2 ad /* If we're being called from inside a dirop, don't sleep */
638 1.103.10.2 ad if (ip->i_flag & IN_ADIROP)
639 1.103.10.2 ad return 0;
640 1.103.10.2 ad
641 1.103.10.2 ad fs = ip->i_lfs;
642 1.103.10.2 ad
643 1.103.10.2 ad ASSERT_NO_SEGLOCK(fs);
644 1.103.10.2 ad LOCK_ASSERT(!simple_lock_held(&fs->lfs_interlock));
645 1.103.10.2 ad
646 1.103.10.2 ad /*
647 1.103.10.2 ad * If we would flush below, but dirops are active, sleep.
648 1.103.10.2 ad * Note that a dirop cannot ever reach this code!
649 1.103.10.2 ad */
650 1.103.10.2 ad simple_lock(&fs->lfs_interlock);
651 1.103.10.2 ad simple_lock(&lfs_subsys_lock);
652 1.103.10.2 ad while (fs->lfs_dirops > 0 &&
653 1.103.10.2 ad (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
654 1.103.10.2 ad locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
655 1.103.10.2 ad lfs_subsys_pages > LFS_MAX_PAGES ||
656 1.103.10.2 ad fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
657 1.103.10.2 ad lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0))
658 1.103.10.2 ad {
659 1.103.10.2 ad simple_unlock(&lfs_subsys_lock);
660 1.103.10.2 ad ++fs->lfs_diropwait;
661 1.103.10.2 ad ltsleep(&fs->lfs_writer, PRIBIO+1, "bufdirop", 0,
662 1.103.10.2 ad &fs->lfs_interlock);
663 1.103.10.2 ad --fs->lfs_diropwait;
664 1.103.10.2 ad simple_lock(&lfs_subsys_lock);
665 1.103.10.2 ad }
666 1.103.10.2 ad
667 1.103.10.2 ad #ifdef DEBUG
668 1.103.10.2 ad if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS)
669 1.103.10.2 ad DLOG((DLOG_FLUSH, "lfs_check: lqc = %d, max %d\n",
670 1.103.10.2 ad locked_queue_count + INOCOUNT(fs), LFS_MAX_BUFS));
671 1.103.10.2 ad if (locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES)
672 1.103.10.2 ad DLOG((DLOG_FLUSH, "lfs_check: lqb = %ld, max %ld\n",
673 1.103.10.2 ad locked_queue_bytes + INOBYTES(fs), LFS_MAX_BYTES));
674 1.103.10.2 ad if (lfs_subsys_pages > LFS_MAX_PAGES)
675 1.103.10.2 ad DLOG((DLOG_FLUSH, "lfs_check: lssp = %d, max %d\n",
676 1.103.10.2 ad lfs_subsys_pages, LFS_MAX_PAGES));
677 1.103.10.2 ad if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip)
678 1.103.10.2 ad DLOG((DLOG_FLUSH, "lfs_check: fssp = %d, trip at %d\n",
679 1.103.10.2 ad fs->lfs_pages, lfs_fs_pagetrip));
680 1.103.10.2 ad if (lfs_dirvcount > LFS_MAX_DIROP)
681 1.103.10.2 ad DLOG((DLOG_FLUSH, "lfs_check: ldvc = %d, max %d\n",
682 1.103.10.2 ad lfs_dirvcount, LFS_MAX_DIROP));
683 1.103.10.2 ad if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs))
684 1.103.10.2 ad DLOG((DLOG_FLUSH, "lfs_check: lfdvc = %d, max %d\n",
685 1.103.10.2 ad fs->lfs_dirvcount, LFS_MAX_FSDIROP(fs)));
686 1.103.10.2 ad if (fs->lfs_diropwait > 0)
687 1.103.10.2 ad DLOG((DLOG_FLUSH, "lfs_check: ldvw = %d\n",
688 1.103.10.2 ad fs->lfs_diropwait));
689 1.103.10.2 ad #endif
690 1.103.10.2 ad
691 1.103.10.2 ad /* If there are too many pending dirops, we have to flush them. */
692 1.103.10.2 ad if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
693 1.103.10.2 ad lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
694 1.103.10.2 ad flags |= SEGM_CKP;
695 1.103.10.2 ad }
696 1.103.10.2 ad
697 1.103.10.2 ad if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
698 1.103.10.2 ad locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
699 1.103.10.2 ad lfs_subsys_pages > LFS_MAX_PAGES ||
700 1.103.10.2 ad fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
701 1.103.10.2 ad lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
702 1.103.10.2 ad simple_unlock(&fs->lfs_interlock);
703 1.103.10.2 ad lfs_flush(fs, flags, 0);
704 1.103.10.2 ad } else if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip) {
705 1.103.10.2 ad /*
706 1.103.10.2 ad * If we didn't flush the whole thing, some filesystems
707 1.103.10.2 ad * still might want to be flushed.
708 1.103.10.2 ad */
709 1.103.10.2 ad ++fs->lfs_pdflush;
710 1.103.10.2 ad wakeup(&lfs_writer_daemon);
711 1.103.10.2 ad simple_unlock(&fs->lfs_interlock);
712 1.103.10.2 ad } else
713 1.103.10.2 ad simple_unlock(&fs->lfs_interlock);
714 1.103.10.2 ad
715 1.103.10.2 ad while (locked_queue_count + INOCOUNT(fs) > LFS_WAIT_BUFS ||
716 1.103.10.2 ad locked_queue_bytes + INOBYTES(fs) > LFS_WAIT_BYTES ||
717 1.103.10.2 ad lfs_subsys_pages > LFS_WAIT_PAGES ||
718 1.103.10.2 ad fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
719 1.103.10.2 ad lfs_dirvcount > LFS_MAX_DIROP) {
720 1.103.10.2 ad
721 1.103.10.2 ad if (lfs_dostats)
722 1.103.10.2 ad ++lfs_stats.wait_exceeded;
723 1.103.10.2 ad DLOG((DLOG_AVAIL, "lfs_check: waiting: count=%d, bytes=%ld\n",
724 1.103.10.2 ad locked_queue_count, locked_queue_bytes));
725 1.103.10.2 ad error = ltsleep(&locked_queue_count, PCATCH | PUSER,
726 1.103.10.2 ad "buffers", hz * LFS_BUFWAIT, &lfs_subsys_lock);
727 1.103.10.2 ad if (error != EWOULDBLOCK)
728 1.103.10.2 ad break;
729 1.103.10.2 ad
730 1.103.10.2 ad /*
731 1.103.10.2 ad * lfs_flush might not flush all the buffers, if some of the
732 1.103.10.2 ad * inodes were locked or if most of them were Ifile blocks
733 1.103.10.2 ad * and we weren't asked to checkpoint. Try flushing again
734 1.103.10.2 ad * to keep us from blocking indefinitely.
735 1.103.10.2 ad */
736 1.103.10.2 ad if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
737 1.103.10.2 ad locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES) {
738 1.103.10.2 ad lfs_flush(fs, flags | SEGM_CKP, 0);
739 1.103.10.2 ad }
740 1.103.10.2 ad }
741 1.103.10.2 ad simple_unlock(&lfs_subsys_lock);
742 1.103.10.2 ad return (error);
743 1.103.10.2 ad }
744 1.103.10.2 ad
745 1.103.10.2 ad /*
746 1.103.10.2 ad * Allocate a new buffer header.
747 1.103.10.2 ad */
748 1.103.10.2 ad struct buf *
749 1.103.10.2 ad lfs_newbuf(struct lfs *fs, struct vnode *vp, daddr_t daddr, size_t size, int type)
750 1.103.10.2 ad {
751 1.103.10.2 ad struct buf *bp;
752 1.103.10.2 ad size_t nbytes;
753 1.103.10.2 ad int s;
754 1.103.10.2 ad
755 1.103.10.2 ad ASSERT_MAYBE_SEGLOCK(fs);
756 1.103.10.2 ad nbytes = roundup(size, fsbtob(fs, 1));
757 1.103.10.2 ad
758 1.103.10.2 ad bp = getiobuf();
759 1.103.10.2 ad if (nbytes) {
760 1.103.10.2 ad bp->b_data = lfs_malloc(fs, nbytes, type);
761 1.103.10.2 ad /* memset(bp->b_data, 0, nbytes); */
762 1.103.10.2 ad }
763 1.103.10.2 ad #ifdef DIAGNOSTIC
764 1.103.10.2 ad if (vp == NULL)
765 1.103.10.2 ad panic("vp is NULL in lfs_newbuf");
766 1.103.10.2 ad if (bp == NULL)
767 1.103.10.2 ad panic("bp is NULL after malloc in lfs_newbuf");
768 1.103.10.2 ad #endif
769 1.103.10.2 ad bp->b_vp = NULL;
770 1.103.10.2 ad s = splbio();
771 1.103.10.2 ad bgetvp(vp, bp);
772 1.103.10.2 ad splx(s);
773 1.103.10.2 ad
774 1.103.10.2 ad bp->b_bufsize = size;
775 1.103.10.2 ad bp->b_bcount = size;
776 1.103.10.2 ad bp->b_lblkno = daddr;
777 1.103.10.2 ad bp->b_blkno = daddr;
778 1.103.10.2 ad bp->b_error = 0;
779 1.103.10.2 ad bp->b_resid = 0;
780 1.103.10.2 ad bp->b_iodone = lfs_callback;
781 1.103.10.2 ad bp->b_flags = B_BUSY | B_CALL | B_NOCACHE;
782 1.103.10.2 ad bp->b_private = fs;
783 1.103.10.2 ad
784 1.103.10.2 ad return (bp);
785 1.103.10.2 ad }
786 1.103.10.2 ad
787 1.103.10.2 ad void
788 1.103.10.2 ad lfs_freebuf(struct lfs *fs, struct buf *bp)
789 1.103.10.2 ad {
790 1.103.10.2 ad int s;
791 1.103.10.2 ad
792 1.103.10.2 ad s = splbio();
793 1.103.10.2 ad if (bp->b_vp)
794 1.103.10.2 ad brelvp(bp);
795 1.103.10.2 ad if (!(bp->b_flags & B_INVAL)) { /* B_INVAL indicates a "fake" buffer */
796 1.103.10.2 ad lfs_free(fs, bp->b_data, LFS_NB_UNKNOWN);
797 1.103.10.2 ad bp->b_data = NULL;
798 1.103.10.2 ad }
799 1.103.10.2 ad splx(s);
800 1.103.10.2 ad putiobuf(bp);
801 1.103.10.2 ad }
802 1.103.10.2 ad
803 1.103.10.2 ad /*
804 1.103.10.2 ad * Definitions for the buffer free lists.
805 1.103.10.2 ad */
806 1.103.10.2 ad #define BQUEUES 4 /* number of free buffer queues */
807 1.103.10.2 ad
808 1.103.10.2 ad #define BQ_LOCKED 0 /* super-blocks &c */
809 1.103.10.2 ad #define BQ_LRU 1 /* lru, useful buffers */
810 1.103.10.2 ad #define BQ_AGE 2 /* rubbish */
811 1.103.10.2 ad #define BQ_EMPTY 3 /* buffer headers with no memory */
812 1.103.10.2 ad
813 1.103.10.2 ad extern TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
814 1.103.10.2 ad extern struct simplelock bqueue_slock;
815 1.103.10.2 ad
816 1.103.10.2 ad /*
817 1.103.10.2 ad * Count buffers on the "locked" queue, and compare it to a pro-forma count.
818 1.103.10.2 ad * Don't count malloced buffers, since they don't detract from the total.
819 1.103.10.2 ad */
820 1.103.10.2 ad void
821 1.103.10.2 ad lfs_countlocked(int *count, long *bytes, const char *msg)
822 1.103.10.2 ad {
823 1.103.10.2 ad struct buf *bp;
824 1.103.10.2 ad int n = 0;
825 1.103.10.2 ad long int size = 0L;
826 1.103.10.2 ad int s;
827 1.103.10.2 ad
828 1.103.10.2 ad s = splbio();
829 1.103.10.2 ad simple_lock(&bqueue_slock);
830 1.103.10.2 ad TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED], b_freelist) {
831 1.103.10.2 ad KASSERT(!(bp->b_flags & B_CALL));
832 1.103.10.2 ad n++;
833 1.103.10.2 ad size += bp->b_bufsize;
834 1.103.10.2 ad #ifdef DIAGNOSTIC
835 1.103.10.2 ad if (n > nbuf)
836 1.103.10.2 ad panic("lfs_countlocked: this can't happen: more"
837 1.103.10.2 ad " buffers locked than exist");
838 1.103.10.2 ad #endif
839 1.103.10.2 ad }
840 1.103.10.2 ad /*
841 1.103.10.2 ad * Theoretically this function never really does anything.
842 1.103.10.2 ad * Give a warning if we have to fix the accounting.
843 1.103.10.2 ad */
844 1.103.10.2 ad if (n != *count) {
845 1.103.10.2 ad DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted buf count"
846 1.103.10.2 ad " from %d to %d\n", msg, *count, n));
847 1.103.10.2 ad }
848 1.103.10.2 ad if (size != *bytes) {
849 1.103.10.2 ad DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted byte count"
850 1.103.10.2 ad " from %ld to %ld\n", msg, *bytes, size));
851 1.103.10.2 ad }
852 1.103.10.2 ad *count = n;
853 1.103.10.2 ad *bytes = size;
854 1.103.10.2 ad simple_unlock(&bqueue_slock);
855 1.103.10.2 ad splx(s);
856 1.103.10.2 ad return;
857 1.103.10.2 ad }
858 1.103.10.2 ad
859 1.103.10.2 ad int
860 1.103.10.2 ad lfs_wait_pages(void)
861 1.103.10.2 ad {
862 1.103.10.2 ad int active, inactive;
863 1.103.10.2 ad
864 1.103.10.2 ad uvm_estimatepageable(&active, &inactive);
865 1.103.10.2 ad return LFS_WAIT_RESOURCE(active + inactive + uvmexp.free, 1);
866 1.103.10.2 ad }
867 1.103.10.2 ad
868 1.103.10.2 ad int
869 1.103.10.2 ad lfs_max_pages(void)
870 1.103.10.2 ad {
871 1.103.10.2 ad int active, inactive;
872 1.103.10.2 ad
873 1.103.10.2 ad uvm_estimatepageable(&active, &inactive);
874 1.103.10.2 ad return LFS_MAX_RESOURCE(active + inactive + uvmexp.free, 1);
875 1.103.10.2 ad }
876