lfs_bio.c revision 1.6 1 /* $NetBSD: lfs_bio.c,v 1.6 1998/03/01 02:23:24 fvdl Exp $ */
2
3 /*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)lfs_bio.c 8.10 (Berkeley) 6/10/95
36 */
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/proc.h>
41 #include <sys/buf.h>
42 #include <sys/vnode.h>
43 #include <sys/resourcevar.h>
44 #include <sys/mount.h>
45 #include <sys/kernel.h>
46
47 #include <ufs/ufs/quota.h>
48 #include <ufs/ufs/inode.h>
49 #include <ufs/ufs/ufsmount.h>
50 #include <ufs/ufs/ufs_extern.h>
51
52 #include <ufs/lfs/lfs.h>
53 #include <ufs/lfs/lfs_extern.h>
54
55 /*
56 * LFS block write function.
57 *
58 * XXX
59 * No write cost accounting is done.
60 * This is almost certainly wrong for synchronous operations and NFS.
61 */
62 int lfs_allclean_wakeup; /* Cleaner wakeup address. */
63 int locked_queue_count; /* XXX Count of locked-down buffers. */
64 int lfs_writing; /* Set if already kicked off a writer
65 because of buffer space */
66 /*
67 #define WRITE_THRESHHOLD ((nbuf >> 2) - 10)
68 #define WAIT_THRESHHOLD ((nbuf >> 1) - 10)
69 */
70 #define WAIT_THRESHHOLD (nbuf - (nbuf >> 2) - 10)
71 #define WRITE_THRESHHOLD ((nbuf >> 1) - 10)
72 #define LFS_BUFWAIT 2
73
74 int
75 lfs_bwrite(v)
76 void *v;
77 {
78 struct vop_bwrite_args /* {
79 struct buf *a_bp;
80 } */ *ap = v;
81 register struct buf *bp = ap->a_bp;
82 struct lfs *fs;
83 struct inode *ip;
84 int db, error, s;
85
86 /*
87 * Set the delayed write flag and use reassignbuf to move the buffer
88 * from the clean list to the dirty one.
89 *
90 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
91 * the buffer onto the LOCKED free list. This is necessary, otherwise
92 * getnewbuf() would try to reclaim the buffers using bawrite, which
93 * isn't going to work.
94 *
95 * XXX we don't let meta-data writes run out of space because they can
96 * come from the segment writer. We need to make sure that there is
97 * enough space reserved so that there's room to write meta-data
98 * blocks.
99 */
100 if (!(bp->b_flags & B_LOCKED)) {
101 fs = VFSTOUFS(bp->b_vp->v_mount)->um_lfs;
102 db = fragstodb(fs, numfrags(fs, bp->b_bcount));
103 while (!LFS_FITS(fs, db) && !IS_IFILE(bp) &&
104 bp->b_lblkno > 0) {
105 /* Out of space, need cleaner to run */
106 wakeup(&lfs_allclean_wakeup);
107 wakeup(&fs->lfs_nextseg);
108 error = tsleep(&fs->lfs_avail, PCATCH | PUSER,
109 "cleaner", NULL);
110 if (error) {
111 brelse(bp);
112 return (error);
113 }
114 }
115 ip = VTOI((bp)->b_vp);
116 if (!(ip->i_flag & IN_MODIFIED))
117 ++fs->lfs_uinodes;
118 ip->i_flag |= IN_CHANGE | IN_MODIFIED | IN_UPDATE;
119 fs->lfs_avail -= db;
120 ++locked_queue_count;
121 bp->b_flags |= B_DELWRI | B_LOCKED;
122 bp->b_flags &= ~(B_READ | B_ERROR);
123 s = splbio();
124 reassignbuf(bp, bp->b_vp);
125 splx(s);
126 }
127 brelse(bp);
128 return (0);
129 }
130
131 /*
132 * XXX
133 * This routine flushes buffers out of the B_LOCKED queue when LFS has too
134 * many locked down. Eventually the pageout daemon will simply call LFS
135 * when pages need to be reclaimed. Note, we have one static count of locked
136 * buffers, so we can't have more than a single file system. To make this
137 * work for multiple file systems, put the count into the mount structure.
138 */
139 void
140 lfs_flush()
141 {
142 register struct mount *mp, *nmp;
143
144 #ifdef DOSTATS
145 ++lfs_stats.write_exceeded;
146 #endif
147 if (lfs_writing)
148 return;
149 lfs_writing = 1;
150 simple_lock(&mountlist_slock);
151 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
152 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
153 nmp = mp->mnt_list.cqe_next;
154 continue;
155 }
156 if (!strncmp(&mp->mnt_stat.f_fstypename[0], MOUNT_LFS, MFSNAMELEN) &&
157 (mp->mnt_flag & MNT_RDONLY) == 0 &&
158 !((((struct ufsmount *)mp->mnt_data))->ufsmount_u.lfs)->lfs_dirops ) {
159 /*
160 * We set the queue to 0 here because we are about to
161 * write all the dirty buffers we have. If more come
162 * in while we're writing the segment, they may not
163 * get written, so we want the count to reflect these
164 * new writes after the segwrite completes.
165 */
166 #ifdef DOSTATS
167 ++lfs_stats.flush_invoked;
168 #endif
169 lfs_segwrite(mp, 0);
170 }
171 simple_lock(&mountlist_slock);
172 nmp = mp->mnt_list.cqe_next;
173 vfs_unbusy(mp);
174 }
175 simple_unlock(&mountlist_slock);
176 lfs_writing = 0;
177 }
178
179 int
180 lfs_check(vp, blkno)
181 struct vnode *vp;
182 ufs_daddr_t blkno;
183 {
184 int error;
185
186 error = 0;
187 if (incore(vp, blkno))
188 return (0);
189 if (locked_queue_count > WRITE_THRESHHOLD)
190 lfs_flush();
191
192 /* If out of buffers, wait on writer */
193 while (locked_queue_count > WAIT_THRESHHOLD) {
194 #ifdef DOSTATS
195 ++lfs_stats.wait_exceeded;
196 #endif
197 error = tsleep(&locked_queue_count, PCATCH | PUSER, "buffers",
198 hz * LFS_BUFWAIT);
199 }
200
201 return (error);
202 }
203