lfs_syscalls.c revision 1.161 1 1.161 dholland /* $NetBSD: lfs_syscalls.c,v 1.161 2015/07/24 06:56:42 dholland Exp $ */
2 1.3 cgd
3 1.1 mycroft /*-
4 1.129 ad * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2007, 2007, 2008
5 1.128 ad * The NetBSD Foundation, Inc.
6 1.22 perseant * All rights reserved.
7 1.22 perseant *
8 1.22 perseant * This code is derived from software contributed to The NetBSD Foundation
9 1.22 perseant * by Konrad E. Schroder <perseant (at) hhhh.org>.
10 1.22 perseant *
11 1.22 perseant * Redistribution and use in source and binary forms, with or without
12 1.22 perseant * modification, are permitted provided that the following conditions
13 1.22 perseant * are met:
14 1.22 perseant * 1. Redistributions of source code must retain the above copyright
15 1.22 perseant * notice, this list of conditions and the following disclaimer.
16 1.22 perseant * 2. Redistributions in binary form must reproduce the above copyright
17 1.22 perseant * notice, this list of conditions and the following disclaimer in the
18 1.22 perseant * documentation and/or other materials provided with the distribution.
19 1.22 perseant *
20 1.22 perseant * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.22 perseant * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.22 perseant * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.22 perseant * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.22 perseant * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.22 perseant * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.22 perseant * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.22 perseant * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.22 perseant * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.22 perseant * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.22 perseant * POSSIBILITY OF SUCH DAMAGE.
31 1.22 perseant */
32 1.22 perseant /*-
33 1.1 mycroft * Copyright (c) 1991, 1993, 1994
34 1.1 mycroft * The Regents of the University of California. All rights reserved.
35 1.1 mycroft *
36 1.1 mycroft * Redistribution and use in source and binary forms, with or without
37 1.1 mycroft * modification, are permitted provided that the following conditions
38 1.1 mycroft * are met:
39 1.1 mycroft * 1. Redistributions of source code must retain the above copyright
40 1.1 mycroft * notice, this list of conditions and the following disclaimer.
41 1.1 mycroft * 2. Redistributions in binary form must reproduce the above copyright
42 1.1 mycroft * notice, this list of conditions and the following disclaimer in the
43 1.1 mycroft * documentation and/or other materials provided with the distribution.
44 1.97 agc * 3. Neither the name of the University nor the names of its contributors
45 1.1 mycroft * may be used to endorse or promote products derived from this software
46 1.1 mycroft * without specific prior written permission.
47 1.1 mycroft *
48 1.1 mycroft * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 1.1 mycroft * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 1.1 mycroft * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 1.1 mycroft * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 1.1 mycroft * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 1.1 mycroft * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 1.1 mycroft * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 1.1 mycroft * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 1.1 mycroft * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 1.1 mycroft * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 1.1 mycroft * SUCH DAMAGE.
59 1.1 mycroft *
60 1.16 fvdl * @(#)lfs_syscalls.c 8.10 (Berkeley) 5/14/95
61 1.1 mycroft */
62 1.61 lukem
63 1.61 lukem #include <sys/cdefs.h>
64 1.161 dholland __KERNEL_RCSID(0, "$NetBSD: lfs_syscalls.c,v 1.161 2015/07/24 06:56:42 dholland Exp $");
65 1.15 thorpej
66 1.86 perseant #ifndef LFS
67 1.86 perseant # define LFS /* for prototypes in syscallargs.h */
68 1.86 perseant #endif
69 1.1 mycroft
70 1.1 mycroft #include <sys/param.h>
71 1.5 cgd #include <sys/systm.h>
72 1.1 mycroft #include <sys/proc.h>
73 1.1 mycroft #include <sys/buf.h>
74 1.1 mycroft #include <sys/mount.h>
75 1.1 mycroft #include <sys/vnode.h>
76 1.1 mycroft #include <sys/kernel.h>
77 1.113 elad #include <sys/kauth.h>
78 1.5 cgd #include <sys/syscallargs.h>
79 1.5 cgd
80 1.145 dholland #include <ufs/lfs/ulfs_inode.h>
81 1.145 dholland #include <ufs/lfs/ulfsmount.h>
82 1.145 dholland #include <ufs/lfs/ulfs_extern.h>
83 1.1 mycroft
84 1.1 mycroft #include <ufs/lfs/lfs.h>
85 1.148 dholland #include <ufs/lfs/lfs_kernel.h>
86 1.1 mycroft #include <ufs/lfs/lfs_extern.h>
87 1.10 christos
88 1.160 hannken static int lfs_fastvget(struct mount *, ino_t, BLOCK_INFO *, int,
89 1.160 hannken struct vnode **);
90 1.122 christos struct buf *lfs_fakebuf(struct lfs *, struct vnode *, int, size_t, void *);
91 1.80 perseant
92 1.1 mycroft /*
93 1.31 christos * sys_lfs_markv:
94 1.1 mycroft *
95 1.1 mycroft * This will mark inodes and blocks dirty, so they are written into the log.
96 1.1 mycroft * It will block until all the blocks have been written. The segment create
97 1.1 mycroft * time passed in the block_info and inode_info structures is used to decide
98 1.1 mycroft * if the data is valid for each block (in case some process dirtied a block
99 1.1 mycroft * or inode that is being cleaned between the determination that a block is
100 1.1 mycroft * live and the lfs_markv call).
101 1.1 mycroft *
102 1.1 mycroft * 0 on success
103 1.1 mycroft * -1/errno is return on error.
104 1.1 mycroft */
105 1.57 perseant #ifdef USE_64BIT_SYSCALLS
106 1.1 mycroft int
107 1.125 dsl sys_lfs_markv(struct lwp *l, const struct sys_lfs_markv_args *uap, register_t *retval)
108 1.9 thorpej {
109 1.125 dsl /* {
110 1.5 cgd syscallarg(fsid_t *) fsidp;
111 1.5 cgd syscallarg(struct block_info *) blkiov;
112 1.5 cgd syscallarg(int) blkcnt;
113 1.125 dsl } */
114 1.57 perseant BLOCK_INFO *blkiov;
115 1.57 perseant int blkcnt, error;
116 1.57 perseant fsid_t fsid;
117 1.105 perseant struct lfs *fs;
118 1.105 perseant struct mount *mntp;
119 1.57 perseant
120 1.142 elad error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
121 1.142 elad KAUTH_REQ_SYSTEM_LFS_MARKV, NULL, NULL, NULL);
122 1.142 elad if (error)
123 1.57 perseant return (error);
124 1.102 perry
125 1.57 perseant if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
126 1.57 perseant return (error);
127 1.57 perseant
128 1.105 perseant if ((mntp = vfs_getvfs(fsidp)) == NULL)
129 1.105 perseant return (ENOENT);
130 1.146 dholland fs = VFSTOULFS(mntp)->um_lfs;
131 1.105 perseant
132 1.57 perseant blkcnt = SCARG(uap, blkcnt);
133 1.84 perseant if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT)
134 1.58 jdolecek return (EINVAL);
135 1.58 jdolecek
136 1.129 ad KERNEL_LOCK(1, NULL);
137 1.105 perseant blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
138 1.57 perseant if ((error = copyin(SCARG(uap, blkiov), blkiov,
139 1.57 perseant blkcnt * sizeof(BLOCK_INFO))) != 0)
140 1.57 perseant goto out;
141 1.57 perseant
142 1.57 perseant if ((error = lfs_markv(p, &fsid, blkiov, blkcnt)) == 0)
143 1.57 perseant copyout(blkiov, SCARG(uap, blkiov),
144 1.57 perseant blkcnt * sizeof(BLOCK_INFO));
145 1.57 perseant out:
146 1.105 perseant lfs_free(fs, blkiov, LFS_NB_BLKIOV);
147 1.129 ad KERNEL_UNLOCK_ONE(NULL);
148 1.57 perseant return error;
149 1.57 perseant }
150 1.57 perseant #else
151 1.57 perseant int
152 1.125 dsl sys_lfs_markv(struct lwp *l, const struct sys_lfs_markv_args *uap, register_t *retval)
153 1.57 perseant {
154 1.125 dsl /* {
155 1.57 perseant syscallarg(fsid_t *) fsidp;
156 1.57 perseant syscallarg(struct block_info *) blkiov;
157 1.57 perseant syscallarg(int) blkcnt;
158 1.125 dsl } */
159 1.57 perseant BLOCK_INFO *blkiov;
160 1.57 perseant BLOCK_INFO_15 *blkiov15;
161 1.57 perseant int i, blkcnt, error;
162 1.57 perseant fsid_t fsid;
163 1.105 perseant struct lfs *fs;
164 1.105 perseant struct mount *mntp;
165 1.57 perseant
166 1.142 elad error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
167 1.142 elad KAUTH_REQ_SYSTEM_LFS_MARKV, NULL, NULL, NULL);
168 1.142 elad if (error)
169 1.57 perseant return (error);
170 1.102 perry
171 1.57 perseant if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
172 1.57 perseant return (error);
173 1.57 perseant
174 1.105 perseant if ((mntp = vfs_getvfs(&fsid)) == NULL)
175 1.105 perseant return (ENOENT);
176 1.146 dholland fs = VFSTOULFS(mntp)->um_lfs;
177 1.105 perseant
178 1.57 perseant blkcnt = SCARG(uap, blkcnt);
179 1.84 perseant if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT)
180 1.58 jdolecek return (EINVAL);
181 1.58 jdolecek
182 1.129 ad KERNEL_LOCK(1, NULL);
183 1.105 perseant blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
184 1.105 perseant blkiov15 = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO_15), LFS_NB_BLKIOV);
185 1.57 perseant if ((error = copyin(SCARG(uap, blkiov), blkiov15,
186 1.57 perseant blkcnt * sizeof(BLOCK_INFO_15))) != 0)
187 1.57 perseant goto out;
188 1.57 perseant
189 1.57 perseant for (i = 0; i < blkcnt; i++) {
190 1.57 perseant blkiov[i].bi_inode = blkiov15[i].bi_inode;
191 1.57 perseant blkiov[i].bi_lbn = blkiov15[i].bi_lbn;
192 1.57 perseant blkiov[i].bi_daddr = blkiov15[i].bi_daddr;
193 1.57 perseant blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
194 1.57 perseant blkiov[i].bi_version = blkiov15[i].bi_version;
195 1.82 perseant blkiov[i].bi_bp = blkiov15[i].bi_bp;
196 1.57 perseant blkiov[i].bi_size = blkiov15[i].bi_size;
197 1.57 perseant }
198 1.57 perseant
199 1.115 ad if ((error = lfs_markv(l->l_proc, &fsid, blkiov, blkcnt)) == 0) {
200 1.57 perseant for (i = 0; i < blkcnt; i++) {
201 1.82 perseant blkiov15[i].bi_inode = blkiov[i].bi_inode;
202 1.82 perseant blkiov15[i].bi_lbn = blkiov[i].bi_lbn;
203 1.82 perseant blkiov15[i].bi_daddr = blkiov[i].bi_daddr;
204 1.57 perseant blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
205 1.82 perseant blkiov15[i].bi_version = blkiov[i].bi_version;
206 1.82 perseant blkiov15[i].bi_bp = blkiov[i].bi_bp;
207 1.82 perseant blkiov15[i].bi_size = blkiov[i].bi_size;
208 1.57 perseant }
209 1.57 perseant copyout(blkiov15, SCARG(uap, blkiov),
210 1.57 perseant blkcnt * sizeof(BLOCK_INFO_15));
211 1.57 perseant }
212 1.57 perseant out:
213 1.105 perseant lfs_free(fs, blkiov, LFS_NB_BLKIOV);
214 1.105 perseant lfs_free(fs, blkiov15, LFS_NB_BLKIOV);
215 1.129 ad KERNEL_UNLOCK_ONE(NULL);
216 1.57 perseant return error;
217 1.57 perseant }
218 1.57 perseant #endif
219 1.57 perseant
220 1.77 yamt #define LFS_MARKV_MAX_BLOCKS (LFS_MAX_BUFS)
221 1.77 yamt
222 1.84 perseant int
223 1.118 christos lfs_markv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov,
224 1.117 christos int blkcnt)
225 1.57 perseant {
226 1.1 mycroft BLOCK_INFO *blkp;
227 1.1 mycroft IFILE *ifp;
228 1.96 yamt struct buf *bp;
229 1.10 christos struct inode *ip = NULL;
230 1.1 mycroft struct lfs *fs;
231 1.1 mycroft struct mount *mntp;
232 1.159 hannken struct ulfsmount *ump;
233 1.159 hannken struct vnode *vp;
234 1.1 mycroft ino_t lastino;
235 1.159 hannken daddr_t b_daddr;
236 1.74 yamt int cnt, error;
237 1.62 chs int do_again = 0;
238 1.74 yamt int numrefed = 0;
239 1.49 perseant ino_t maxino;
240 1.69 perseant size_t obsize;
241 1.1 mycroft
242 1.77 yamt /* number of blocks/inodes that we have already bwrite'ed */
243 1.77 yamt int nblkwritten, ninowritten;
244 1.77 yamt
245 1.57 perseant if ((mntp = vfs_getvfs(fsidp)) == NULL)
246 1.53 perseant return (ENOENT);
247 1.1 mycroft
248 1.159 hannken ump = VFSTOULFS(mntp);
249 1.159 hannken fs = ump->um_lfs;
250 1.96 yamt
251 1.96 yamt if (fs->lfs_ronly)
252 1.96 yamt return EROFS;
253 1.96 yamt
254 1.147 christos maxino = (lfs_fragstoblks(fs, VTOI(fs->lfs_ivnode)->i_ffs1_blocks) -
255 1.161 dholland lfs_sb_getcleansz(fs) - lfs_sb_getsegtabsz(fs)) * lfs_sb_getifpb(fs);
256 1.49 perseant
257 1.57 perseant cnt = blkcnt;
258 1.102 perry
259 1.132 ad if ((error = vfs_busy(mntp, NULL)) != 0)
260 1.53 perseant return (error);
261 1.53 perseant
262 1.22 perseant /*
263 1.22 perseant * This seglock is just to prevent the fact that we might have to sleep
264 1.22 perseant * from allowing the possibility that our blocks might become
265 1.22 perseant * invalid.
266 1.22 perseant *
267 1.22 perseant * It is also important to note here that unless we specify SEGM_CKP,
268 1.22 perseant * any Ifile blocks that we might be asked to clean will never get
269 1.22 perseant * to the disk.
270 1.22 perseant */
271 1.67 perseant lfs_seglock(fs, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);
272 1.102 perry
273 1.1 mycroft /* Mark blocks/inodes dirty. */
274 1.1 mycroft error = 0;
275 1.1 mycroft
276 1.22 perseant /* these were inside the initialization for the for loop */
277 1.159 hannken vp = NULL;
278 1.22 perseant lastino = LFS_UNUSED_INUM;
279 1.77 yamt nblkwritten = ninowritten = 0;
280 1.57 perseant for (blkp = blkiov; cnt--; ++blkp)
281 1.22 perseant {
282 1.49 perseant /* Bounds-check incoming data, avoid panic for failed VGET */
283 1.49 perseant if (blkp->bi_inode <= 0 || blkp->bi_inode >= maxino) {
284 1.49 perseant error = EINVAL;
285 1.96 yamt goto err3;
286 1.49 perseant }
287 1.1 mycroft /*
288 1.1 mycroft * Get the IFILE entry (only once) and see if the file still
289 1.1 mycroft * exists.
290 1.1 mycroft */
291 1.1 mycroft if (lastino != blkp->bi_inode) {
292 1.22 perseant /*
293 1.159 hannken * Finish the old file, if there was one.
294 1.22 perseant */
295 1.159 hannken if (vp != NULL) {
296 1.160 hannken vput(vp);
297 1.159 hannken vp = NULL;
298 1.22 perseant numrefed--;
299 1.1 mycroft }
300 1.1 mycroft
301 1.22 perseant /*
302 1.22 perseant * Start a new file
303 1.22 perseant */
304 1.1 mycroft lastino = blkp->bi_inode;
305 1.1 mycroft
306 1.1 mycroft /* Get the vnode/inode. */
307 1.159 hannken error = lfs_fastvget(mntp, blkp->bi_inode, blkp,
308 1.159 hannken LK_EXCLUSIVE | LK_NOWAIT, &vp);
309 1.62 chs if (error) {
310 1.103 perseant DLOG((DLOG_CLEAN, "lfs_markv: lfs_fastvget"
311 1.103 perseant " failed with %d (ino %d, segment %d)\n",
312 1.103 perseant error, blkp->bi_inode,
313 1.147 christos lfs_dtosn(fs, blkp->bi_daddr)));
314 1.22 perseant /*
315 1.22 perseant * If we got EAGAIN, that means that the
316 1.22 perseant * Inode was locked. This is
317 1.22 perseant * recoverable: just clean the rest of
318 1.22 perseant * this segment, and let the cleaner try
319 1.82 perseant * again with another. (When the
320 1.22 perseant * cleaner runs again, this segment will
321 1.22 perseant * sort high on the list, since it is
322 1.159 hannken * now almost entirely empty.)
323 1.22 perseant */
324 1.62 chs if (error == EAGAIN) {
325 1.22 perseant error = 0;
326 1.22 perseant do_again++;
327 1.159 hannken } else
328 1.159 hannken KASSERT(error == ENOENT);
329 1.159 hannken KASSERT(vp == NULL);
330 1.22 perseant ip = NULL;
331 1.1 mycroft continue;
332 1.19 pk }
333 1.159 hannken
334 1.1 mycroft ip = VTOI(vp);
335 1.159 hannken numrefed++;
336 1.77 yamt ninowritten++;
337 1.159 hannken } else if (vp == NULL) {
338 1.22 perseant /*
339 1.22 perseant * This can only happen if the vnode is dead (or
340 1.22 perseant * in any case we can't get it...e.g., it is
341 1.22 perseant * inlocked). Keep going.
342 1.22 perseant */
343 1.1 mycroft continue;
344 1.22 perseant }
345 1.22 perseant
346 1.22 perseant /* Past this point we are guaranteed that vp, ip are valid. */
347 1.1 mycroft
348 1.124 ad /* Can't clean VU_DIROP directories in case of truncation */
349 1.116 perseant /* XXX - maybe we should mark removed dirs specially? */
350 1.124 ad if (vp->v_type == VDIR && (vp->v_uflag & VU_DIROP)) {
351 1.116 perseant do_again++;
352 1.116 perseant continue;
353 1.116 perseant }
354 1.116 perseant
355 1.1 mycroft /* If this BLOCK_INFO didn't contain a block, keep going. */
356 1.22 perseant if (blkp->bi_lbn == LFS_UNUSED_LBN) {
357 1.22 perseant /* XXX need to make sure that the inode gets written in this case */
358 1.22 perseant /* XXX but only write the inode if it's the right one */
359 1.53 perseant if (blkp->bi_inode != LFS_IFILE_INUM) {
360 1.53 perseant LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
361 1.126 ad if (ifp->if_daddr == blkp->bi_daddr) {
362 1.126 ad mutex_enter(&lfs_lock);
363 1.47 perseant LFS_SET_UINO(ip, IN_CLEANING);
364 1.126 ad mutex_exit(&lfs_lock);
365 1.126 ad }
366 1.123 ad brelse(bp, 0);
367 1.53 perseant }
368 1.1 mycroft continue;
369 1.22 perseant }
370 1.22 perseant
371 1.22 perseant b_daddr = 0;
372 1.112 perseant if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &b_daddr, NULL) ||
373 1.147 christos LFS_DBTOFSB(fs, b_daddr) != blkp->bi_daddr)
374 1.112 perseant {
375 1.147 christos if (lfs_dtosn(fs, LFS_DBTOFSB(fs, b_daddr)) ==
376 1.147 christos lfs_dtosn(fs, blkp->bi_daddr))
377 1.22 perseant {
378 1.112 perseant DLOG((DLOG_CLEAN, "lfs_markv: wrong da same seg: %llx vs %llx\n",
379 1.147 christos (long long)blkp->bi_daddr, (long long)LFS_DBTOFSB(fs, b_daddr)));
380 1.22 perseant }
381 1.112 perseant do_again++;
382 1.112 perseant continue;
383 1.22 perseant }
384 1.69 perseant
385 1.69 perseant /*
386 1.69 perseant * Check block sizes. The blocks being cleaned come from
387 1.69 perseant * disk, so they should have the same size as their on-disk
388 1.69 perseant * counterparts.
389 1.69 perseant */
390 1.72 yamt if (blkp->bi_lbn >= 0)
391 1.147 christos obsize = lfs_blksize(fs, ip, blkp->bi_lbn);
392 1.72 yamt else
393 1.161 dholland obsize = lfs_sb_getbsize(fs);
394 1.69 perseant /* Check for fragment size change */
395 1.146 dholland if (blkp->bi_lbn >= 0 && blkp->bi_lbn < ULFS_NDADDR) {
396 1.69 perseant obsize = ip->i_lfs_fragsize[blkp->bi_lbn];
397 1.69 perseant }
398 1.69 perseant if (obsize != blkp->bi_size) {
399 1.103 perseant DLOG((DLOG_CLEAN, "lfs_markv: ino %d lbn %lld wrong"
400 1.103 perseant " size (%ld != %d), try again\n",
401 1.103 perseant blkp->bi_inode, (long long)blkp->bi_lbn,
402 1.103 perseant (long) obsize, blkp->bi_size));
403 1.69 perseant do_again++;
404 1.69 perseant continue;
405 1.69 perseant }
406 1.69 perseant
407 1.22 perseant /*
408 1.69 perseant * If we get to here, then we are keeping the block. If
409 1.22 perseant * it is an indirect block, we want to actually put it
410 1.22 perseant * in the buffer cache so that it can be updated in the
411 1.82 perseant * finish_meta section. If it's not, we need to
412 1.22 perseant * allocate a fake buffer so that writeseg can perform
413 1.22 perseant * the copyin and write the buffer.
414 1.22 perseant */
415 1.38 perseant if (ip->i_number != LFS_IFILE_INUM && blkp->bi_lbn >= 0) {
416 1.38 perseant /* Data Block */
417 1.65 perseant bp = lfs_fakebuf(fs, vp, blkp->bi_lbn,
418 1.23 perseant blkp->bi_size, blkp->bi_bp);
419 1.23 perseant /* Pretend we used bread() to get it */
420 1.147 christos bp->b_blkno = LFS_FSBTODB(fs, blkp->bi_daddr);
421 1.38 perseant } else {
422 1.75 yamt /* Indirect block or ifile */
423 1.161 dholland if (blkp->bi_size != lfs_sb_getbsize(fs) &&
424 1.75 yamt ip->i_number != LFS_IFILE_INUM)
425 1.72 yamt panic("lfs_markv: partial indirect block?"
426 1.72 yamt " size=%d\n", blkp->bi_size);
427 1.22 perseant bp = getblk(vp, blkp->bi_lbn, blkp->bi_size, 0, 0);
428 1.126 ad if (!(bp->b_oflags & (BO_DONE|BO_DELWRI))) {
429 1.22 perseant /*
430 1.22 perseant * The block in question was not found
431 1.22 perseant * in the cache; i.e., the block that
432 1.82 perseant * getblk() returned is empty. So, we
433 1.22 perseant * can (and should) copy in the
434 1.22 perseant * contents, because we've already
435 1.22 perseant * determined that this was the right
436 1.22 perseant * version of this block on disk.
437 1.22 perseant *
438 1.22 perseant * And, it can't have changed underneath
439 1.22 perseant * us, because we have the segment lock.
440 1.22 perseant */
441 1.22 perseant error = copyin(blkp->bi_bp, bp->b_data, blkp->bi_size);
442 1.62 chs if (error)
443 1.22 perseant goto err2;
444 1.22 perseant }
445 1.22 perseant }
446 1.96 yamt if ((error = lfs_bwrite_ext(bp, BW_CLEAN)) != 0)
447 1.22 perseant goto err2;
448 1.77 yamt
449 1.77 yamt nblkwritten++;
450 1.77 yamt /*
451 1.77 yamt * XXX should account indirect blocks and ifile pages as well
452 1.77 yamt */
453 1.147 christos if (nblkwritten + lfs_lblkno(fs, ninowritten * sizeof (struct ulfs1_dinode))
454 1.77 yamt > LFS_MARKV_MAX_BLOCKS) {
455 1.103 perseant DLOG((DLOG_CLEAN, "lfs_markv: writing %d blks %d inos\n",
456 1.103 perseant nblkwritten, ninowritten));
457 1.77 yamt lfs_segwrite(mntp, SEGM_CLEAN);
458 1.77 yamt nblkwritten = ninowritten = 0;
459 1.77 yamt }
460 1.22 perseant }
461 1.102 perry
462 1.22 perseant /*
463 1.22 perseant * Finish the old file, if there was one
464 1.22 perseant */
465 1.159 hannken if (vp != NULL) {
466 1.160 hannken vput(vp);
467 1.159 hannken vp = NULL;
468 1.22 perseant numrefed--;
469 1.22 perseant }
470 1.102 perry
471 1.103 perseant #ifdef DIAGNOSTIC
472 1.103 perseant if (numrefed != 0)
473 1.74 yamt panic("lfs_markv: numrefed=%d", numrefed);
474 1.74 yamt #endif
475 1.103 perseant DLOG((DLOG_CLEAN, "lfs_markv: writing %d blks %d inos (check point)\n",
476 1.103 perseant nblkwritten, ninowritten));
477 1.102 perry
478 1.22 perseant /*
479 1.22 perseant * The last write has to be SEGM_SYNC, because of calling semantics.
480 1.22 perseant * It also has to be SEGM_CKP, because otherwise we could write
481 1.22 perseant * over the newly cleaned data contained in a checkpoint, and then
482 1.22 perseant * we'd be unhappy at recovery time.
483 1.22 perseant */
484 1.67 perseant lfs_segwrite(mntp, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);
485 1.102 perry
486 1.1 mycroft lfs_segunlock(fs);
487 1.1 mycroft
488 1.131 ad vfs_unbusy(mntp, false, NULL);
489 1.62 chs if (error)
490 1.22 perseant return (error);
491 1.62 chs else if (do_again)
492 1.22 perseant return EAGAIN;
493 1.1 mycroft
494 1.22 perseant return 0;
495 1.102 perry
496 1.96 yamt err2:
497 1.103 perseant DLOG((DLOG_CLEAN, "lfs_markv err2\n"));
498 1.53 perseant
499 1.96 yamt /*
500 1.96 yamt * XXX we're here because copyin() failed.
501 1.96 yamt * XXX it means that we can't trust the cleanerd. too bad.
502 1.96 yamt * XXX how can we recover from this?
503 1.96 yamt */
504 1.96 yamt
505 1.96 yamt err3:
506 1.96 yamt /*
507 1.96 yamt * XXX should do segwrite here anyway?
508 1.96 yamt */
509 1.96 yamt
510 1.159 hannken if (vp != NULL) {
511 1.160 hannken vput(vp);
512 1.159 hannken vp = NULL;
513 1.96 yamt --numrefed;
514 1.22 perseant }
515 1.96 yamt
516 1.1 mycroft lfs_segunlock(fs);
517 1.131 ad vfs_unbusy(mntp, false, NULL);
518 1.103 perseant #ifdef DIAGNOSTIC
519 1.103 perseant if (numrefed != 0)
520 1.74 yamt panic("lfs_markv: numrefed=%d", numrefed);
521 1.53 perseant #endif
522 1.53 perseant
523 1.22 perseant return (error);
524 1.1 mycroft }
525 1.1 mycroft
526 1.1 mycroft /*
527 1.31 christos * sys_lfs_bmapv:
528 1.1 mycroft *
529 1.1 mycroft * This will fill in the current disk address for arrays of blocks.
530 1.1 mycroft *
531 1.1 mycroft * 0 on success
532 1.1 mycroft * -1/errno is return on error.
533 1.1 mycroft */
534 1.57 perseant #ifdef USE_64BIT_SYSCALLS
535 1.57 perseant int
536 1.125 dsl sys_lfs_bmapv(struct lwp *l, const struct sys_lfs_bmapv_args *uap, register_t *retval)
537 1.57 perseant {
538 1.125 dsl /* {
539 1.57 perseant syscallarg(fsid_t *) fsidp;
540 1.57 perseant syscallarg(struct block_info *) blkiov;
541 1.57 perseant syscallarg(int) blkcnt;
542 1.125 dsl } */
543 1.57 perseant BLOCK_INFO *blkiov;
544 1.57 perseant int blkcnt, error;
545 1.57 perseant fsid_t fsid;
546 1.105 perseant struct lfs *fs;
547 1.105 perseant struct mount *mntp;
548 1.22 perseant
549 1.142 elad error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
550 1.142 elad KAUTH_REQ_SYSTEM_LFS_BMAPV, NULL, NULL, NULL);
551 1.142 elad if (error)
552 1.57 perseant return (error);
553 1.102 perry
554 1.57 perseant if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
555 1.57 perseant return (error);
556 1.57 perseant
557 1.105 perseant if ((mntp = vfs_getvfs(&fsid)) == NULL)
558 1.105 perseant return (ENOENT);
559 1.146 dholland fs = VFSTOULFS(mntp)->um_lfs;
560 1.105 perseant
561 1.57 perseant blkcnt = SCARG(uap, blkcnt);
562 1.71 itojun if ((u_int) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO))
563 1.71 itojun return (EINVAL);
564 1.129 ad KERNEL_LOCK(1, NULL);
565 1.105 perseant blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
566 1.57 perseant if ((error = copyin(SCARG(uap, blkiov), blkiov,
567 1.57 perseant blkcnt * sizeof(BLOCK_INFO))) != 0)
568 1.57 perseant goto out;
569 1.57 perseant
570 1.57 perseant if ((error = lfs_bmapv(p, &fsid, blkiov, blkcnt)) == 0)
571 1.57 perseant copyout(blkiov, SCARG(uap, blkiov),
572 1.57 perseant blkcnt * sizeof(BLOCK_INFO));
573 1.57 perseant out:
574 1.105 perseant lfs_free(fs, blkiov, LFS_NB_BLKIOV);
575 1.129 ad KERNEL_UNLOCK_ONE(NULL);
576 1.57 perseant return error;
577 1.57 perseant }
578 1.57 perseant #else
579 1.1 mycroft int
580 1.125 dsl sys_lfs_bmapv(struct lwp *l, const struct sys_lfs_bmapv_args *uap, register_t *retval)
581 1.9 thorpej {
582 1.125 dsl /* {
583 1.32 drochner syscallarg(fsid_t *) fsidp;
584 1.32 drochner syscallarg(struct block_info *) blkiov;
585 1.32 drochner syscallarg(int) blkcnt;
586 1.125 dsl } */
587 1.57 perseant BLOCK_INFO *blkiov;
588 1.57 perseant BLOCK_INFO_15 *blkiov15;
589 1.57 perseant int i, blkcnt, error;
590 1.57 perseant fsid_t fsid;
591 1.105 perseant struct lfs *fs;
592 1.105 perseant struct mount *mntp;
593 1.57 perseant
594 1.142 elad error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
595 1.142 elad KAUTH_REQ_SYSTEM_LFS_BMAPV, NULL, NULL, NULL);
596 1.142 elad if (error)
597 1.57 perseant return (error);
598 1.102 perry
599 1.57 perseant if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
600 1.57 perseant return (error);
601 1.57 perseant
602 1.105 perseant if ((mntp = vfs_getvfs(&fsid)) == NULL)
603 1.105 perseant return (ENOENT);
604 1.146 dholland fs = VFSTOULFS(mntp)->um_lfs;
605 1.105 perseant
606 1.57 perseant blkcnt = SCARG(uap, blkcnt);
607 1.90 nakayama if ((size_t) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO))
608 1.71 itojun return (EINVAL);
609 1.129 ad KERNEL_LOCK(1, NULL);
610 1.105 perseant blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
611 1.105 perseant blkiov15 = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO_15), LFS_NB_BLKIOV);
612 1.57 perseant if ((error = copyin(SCARG(uap, blkiov), blkiov15,
613 1.57 perseant blkcnt * sizeof(BLOCK_INFO_15))) != 0)
614 1.57 perseant goto out;
615 1.57 perseant
616 1.57 perseant for (i = 0; i < blkcnt; i++) {
617 1.57 perseant blkiov[i].bi_inode = blkiov15[i].bi_inode;
618 1.57 perseant blkiov[i].bi_lbn = blkiov15[i].bi_lbn;
619 1.57 perseant blkiov[i].bi_daddr = blkiov15[i].bi_daddr;
620 1.57 perseant blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
621 1.57 perseant blkiov[i].bi_version = blkiov15[i].bi_version;
622 1.82 perseant blkiov[i].bi_bp = blkiov15[i].bi_bp;
623 1.57 perseant blkiov[i].bi_size = blkiov15[i].bi_size;
624 1.57 perseant }
625 1.57 perseant
626 1.115 ad if ((error = lfs_bmapv(l->l_proc, &fsid, blkiov, blkcnt)) == 0) {
627 1.57 perseant for (i = 0; i < blkcnt; i++) {
628 1.82 perseant blkiov15[i].bi_inode = blkiov[i].bi_inode;
629 1.82 perseant blkiov15[i].bi_lbn = blkiov[i].bi_lbn;
630 1.82 perseant blkiov15[i].bi_daddr = blkiov[i].bi_daddr;
631 1.57 perseant blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
632 1.82 perseant blkiov15[i].bi_version = blkiov[i].bi_version;
633 1.82 perseant blkiov15[i].bi_bp = blkiov[i].bi_bp;
634 1.82 perseant blkiov15[i].bi_size = blkiov[i].bi_size;
635 1.57 perseant }
636 1.57 perseant copyout(blkiov15, SCARG(uap, blkiov),
637 1.57 perseant blkcnt * sizeof(BLOCK_INFO_15));
638 1.57 perseant }
639 1.57 perseant out:
640 1.105 perseant lfs_free(fs, blkiov, LFS_NB_BLKIOV);
641 1.105 perseant lfs_free(fs, blkiov15, LFS_NB_BLKIOV);
642 1.129 ad KERNEL_UNLOCK_ONE(NULL);
643 1.57 perseant return error;
644 1.57 perseant }
645 1.57 perseant #endif
646 1.57 perseant
647 1.84 perseant int
648 1.93 fvdl lfs_bmapv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
649 1.57 perseant {
650 1.1 mycroft BLOCK_INFO *blkp;
651 1.22 perseant IFILE *ifp;
652 1.22 perseant struct buf *bp;
653 1.22 perseant struct inode *ip = NULL;
654 1.22 perseant struct lfs *fs;
655 1.1 mycroft struct mount *mntp;
656 1.160 hannken struct ulfsmount *ump;
657 1.1 mycroft struct vnode *vp;
658 1.22 perseant ino_t lastino;
659 1.79 fvdl daddr_t v_daddr;
660 1.74 yamt int cnt, error;
661 1.74 yamt int numrefed = 0;
662 1.1 mycroft
663 1.57 perseant if ((mntp = vfs_getvfs(fsidp)) == NULL)
664 1.53 perseant return (ENOENT);
665 1.102 perry
666 1.160 hannken ump = VFSTOULFS(mntp);
667 1.132 ad if ((error = vfs_busy(mntp, NULL)) != 0)
668 1.53 perseant return (error);
669 1.102 perry
670 1.160 hannken if (ump->um_cleaner_thread == NULL)
671 1.160 hannken ump->um_cleaner_thread = curlwp;
672 1.160 hannken KASSERT(ump->um_cleaner_thread == curlwp);
673 1.160 hannken
674 1.57 perseant cnt = blkcnt;
675 1.102 perry
676 1.146 dholland fs = VFSTOULFS(mntp)->um_lfs;
677 1.102 perry
678 1.22 perseant error = 0;
679 1.102 perry
680 1.22 perseant /* these were inside the initialization for the for loop */
681 1.159 hannken vp = NULL;
682 1.22 perseant v_daddr = LFS_UNUSED_DADDR;
683 1.22 perseant lastino = LFS_UNUSED_INUM;
684 1.57 perseant for (blkp = blkiov; cnt--; ++blkp)
685 1.22 perseant {
686 1.16 fvdl /*
687 1.22 perseant * Get the IFILE entry (only once) and see if the file still
688 1.22 perseant * exists.
689 1.16 fvdl */
690 1.22 perseant if (lastino != blkp->bi_inode) {
691 1.22 perseant /*
692 1.159 hannken * Finish the old file, if there was one.
693 1.22 perseant */
694 1.159 hannken if (vp != NULL) {
695 1.160 hannken vput(vp);
696 1.159 hannken vp = NULL;
697 1.22 perseant numrefed--;
698 1.22 perseant }
699 1.22 perseant
700 1.22 perseant /*
701 1.22 perseant * Start a new file
702 1.22 perseant */
703 1.22 perseant lastino = blkp->bi_inode;
704 1.22 perseant if (blkp->bi_inode == LFS_IFILE_INUM)
705 1.161 dholland v_daddr = lfs_sb_getidaddr(fs);
706 1.22 perseant else {
707 1.22 perseant LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
708 1.22 perseant v_daddr = ifp->if_daddr;
709 1.123 ad brelse(bp, 0);
710 1.22 perseant }
711 1.22 perseant if (v_daddr == LFS_UNUSED_DADDR) {
712 1.22 perseant blkp->bi_daddr = LFS_UNUSED_DADDR;
713 1.22 perseant continue;
714 1.22 perseant }
715 1.159 hannken error = lfs_fastvget(mntp, blkp->bi_inode, NULL,
716 1.159 hannken LK_SHARED, &vp);
717 1.159 hannken if (error) {
718 1.159 hannken DLOG((DLOG_CLEAN, "lfs_bmapv: lfs_fastvget ino"
719 1.159 hannken "%d failed with %d",
720 1.159 hannken blkp->bi_inode,error));
721 1.159 hannken KASSERT(vp == NULL);
722 1.159 hannken continue;
723 1.159 hannken } else {
724 1.159 hannken KASSERT(VOP_ISLOCKED(vp));
725 1.43 perseant numrefed++;
726 1.22 perseant }
727 1.22 perseant ip = VTOI(vp);
728 1.159 hannken } else if (vp == NULL) {
729 1.22 perseant /*
730 1.22 perseant * This can only happen if the vnode is dead.
731 1.82 perseant * Keep going. Note that we DO NOT set the
732 1.22 perseant * bi_addr to anything -- if we failed to get
733 1.22 perseant * the vnode, for example, we want to assume
734 1.22 perseant * conservatively that all of its blocks *are*
735 1.22 perseant * located in the segment in question.
736 1.22 perseant * lfs_markv will throw them out if we are
737 1.22 perseant * wrong.
738 1.22 perseant */
739 1.22 perseant continue;
740 1.22 perseant }
741 1.22 perseant
742 1.22 perseant /* Past this point we are guaranteed that vp, ip are valid. */
743 1.22 perseant
744 1.62 chs if (blkp->bi_lbn == LFS_UNUSED_LBN) {
745 1.22 perseant /*
746 1.22 perseant * We just want the inode address, which is
747 1.22 perseant * conveniently in v_daddr.
748 1.22 perseant */
749 1.22 perseant blkp->bi_daddr = v_daddr;
750 1.22 perseant } else {
751 1.79 fvdl daddr_t bi_daddr;
752 1.79 fvdl
753 1.79 fvdl /* XXX ondisk32 */
754 1.22 perseant error = VOP_BMAP(vp, blkp->bi_lbn, NULL,
755 1.79 fvdl &bi_daddr, NULL);
756 1.62 chs if (error)
757 1.22 perseant {
758 1.22 perseant blkp->bi_daddr = LFS_UNUSED_DADDR;
759 1.22 perseant continue;
760 1.22 perseant }
761 1.147 christos blkp->bi_daddr = LFS_DBTOFSB(fs, bi_daddr);
762 1.66 perseant /* Fill in the block size, too */
763 1.72 yamt if (blkp->bi_lbn >= 0)
764 1.147 christos blkp->bi_size = lfs_blksize(fs, ip, blkp->bi_lbn);
765 1.72 yamt else
766 1.161 dholland blkp->bi_size = lfs_sb_getbsize(fs);
767 1.22 perseant }
768 1.22 perseant }
769 1.102 perry
770 1.22 perseant /*
771 1.159 hannken * Finish the old file, if there was one.
772 1.22 perseant */
773 1.159 hannken if (vp != NULL) {
774 1.160 hannken vput(vp);
775 1.159 hannken vp = NULL;
776 1.22 perseant numrefed--;
777 1.22 perseant }
778 1.102 perry
779 1.103 perseant #ifdef DIAGNOSTIC
780 1.103 perseant if (numrefed != 0)
781 1.74 yamt panic("lfs_bmapv: numrefed=%d", numrefed);
782 1.74 yamt #endif
783 1.102 perry
784 1.131 ad vfs_unbusy(mntp, false, NULL);
785 1.102 perry
786 1.22 perseant return 0;
787 1.1 mycroft }
788 1.1 mycroft
789 1.1 mycroft /*
790 1.31 christos * sys_lfs_segclean:
791 1.1 mycroft *
792 1.1 mycroft * Mark the segment clean.
793 1.1 mycroft *
794 1.1 mycroft * 0 on success
795 1.1 mycroft * -1/errno is return on error.
796 1.1 mycroft */
797 1.1 mycroft int
798 1.125 dsl sys_lfs_segclean(struct lwp *l, const struct sys_lfs_segclean_args *uap, register_t *retval)
799 1.9 thorpej {
800 1.125 dsl /* {
801 1.32 drochner syscallarg(fsid_t *) fsidp;
802 1.32 drochner syscallarg(u_long) segment;
803 1.125 dsl } */
804 1.80 perseant struct lfs *fs;
805 1.1 mycroft struct mount *mntp;
806 1.1 mycroft fsid_t fsid;
807 1.1 mycroft int error;
808 1.67 perseant unsigned long segnum;
809 1.102 perry
810 1.142 elad error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
811 1.142 elad KAUTH_REQ_SYSTEM_LFS_SEGCLEAN, NULL, NULL, NULL);
812 1.142 elad if (error)
813 1.1 mycroft return (error);
814 1.102 perry
815 1.10 christos if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
816 1.1 mycroft return (error);
817 1.16 fvdl if ((mntp = vfs_getvfs(&fsid)) == NULL)
818 1.53 perseant return (ENOENT);
819 1.102 perry
820 1.146 dholland fs = VFSTOULFS(mntp)->um_lfs;
821 1.67 perseant segnum = SCARG(uap, segment);
822 1.102 perry
823 1.132 ad if ((error = vfs_busy(mntp, NULL)) != 0)
824 1.53 perseant return (error);
825 1.80 perseant
826 1.129 ad KERNEL_LOCK(1, NULL);
827 1.65 perseant lfs_seglock(fs, SEGM_PROT);
828 1.80 perseant error = lfs_do_segclean(fs, segnum);
829 1.80 perseant lfs_segunlock(fs);
830 1.129 ad KERNEL_UNLOCK_ONE(NULL);
831 1.131 ad vfs_unbusy(mntp, false, NULL);
832 1.80 perseant return error;
833 1.80 perseant }
834 1.80 perseant
835 1.80 perseant /*
836 1.80 perseant * Actually mark the segment clean.
837 1.80 perseant * Must be called with the segment lock held.
838 1.80 perseant */
839 1.80 perseant int
840 1.80 perseant lfs_do_segclean(struct lfs *fs, unsigned long segnum)
841 1.80 perseant {
842 1.107 perseant extern int lfs_dostats;
843 1.80 perseant struct buf *bp;
844 1.80 perseant CLEANERINFO *cip;
845 1.80 perseant SEGUSE *sup;
846 1.102 perry
847 1.161 dholland if (lfs_dtosn(fs, lfs_sb_getcurseg(fs)) == segnum) {
848 1.80 perseant return (EBUSY);
849 1.80 perseant }
850 1.102 perry
851 1.67 perseant LFS_SEGENTRY(sup, fs, segnum, bp);
852 1.67 perseant if (sup->su_nbytes) {
853 1.103 perseant DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:"
854 1.103 perseant " %d live bytes\n", segnum, sup->su_nbytes));
855 1.123 ad brelse(bp, 0);
856 1.67 perseant return (EBUSY);
857 1.67 perseant }
858 1.1 mycroft if (sup->su_flags & SEGUSE_ACTIVE) {
859 1.106 perseant DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:"
860 1.106 perseant " segment is active\n", segnum));
861 1.123 ad brelse(bp, 0);
862 1.1 mycroft return (EBUSY);
863 1.50 perseant }
864 1.50 perseant if (!(sup->su_flags & SEGUSE_DIRTY)) {
865 1.106 perseant DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:"
866 1.106 perseant " segment is already clean\n", segnum));
867 1.123 ad brelse(bp, 0);
868 1.50 perseant return (EALREADY);
869 1.1 mycroft }
870 1.102 perry
871 1.161 dholland lfs_sb_addavail(fs, lfs_segtod(fs, 1));
872 1.46 perseant if (sup->su_flags & SEGUSE_SUPERBLOCK)
873 1.161 dholland lfs_sb_subavail(fs, lfs_btofsb(fs, LFS_SBPAD));
874 1.67 perseant if (fs->lfs_version > 1 && segnum == 0 &&
875 1.158 hannken fs->lfs_s0addr < lfs_btofsb(fs, LFS_LABELPAD))
876 1.161 dholland lfs_sb_subavail(fs, lfs_btofsb(fs, LFS_LABELPAD) - fs->lfs_s0addr);
877 1.126 ad mutex_enter(&lfs_lock);
878 1.161 dholland lfs_sb_addbfree(fs, sup->su_nsums * lfs_btofsb(fs, fs->lfs_sumsize) +
879 1.161 dholland lfs_btofsb(fs, sup->su_ninos * lfs_sb_getibsize(fs)));
880 1.161 dholland lfs_sb_subdmeta(fs, sup->su_nsums * lfs_btofsb(fs, fs->lfs_sumsize) +
881 1.161 dholland lfs_btofsb(fs, sup->su_ninos * lfs_sb_getibsize(fs)));
882 1.161 dholland if (lfs_sb_getdmeta(fs) < 0)
883 1.161 dholland lfs_sb_setdmeta(fs, 0);
884 1.126 ad mutex_exit(&lfs_lock);
885 1.1 mycroft sup->su_flags &= ~SEGUSE_DIRTY;
886 1.80 perseant LFS_WRITESEGENTRY(sup, fs, segnum, bp);
887 1.102 perry
888 1.1 mycroft LFS_CLEANERINFO(cip, fs, bp);
889 1.1 mycroft ++cip->clean;
890 1.1 mycroft --cip->dirty;
891 1.22 perseant fs->lfs_nclean = cip->clean;
892 1.126 ad mutex_enter(&lfs_lock);
893 1.161 dholland cip->bfree = lfs_sb_getbfree(fs);
894 1.161 dholland cip->avail = lfs_sb_getavail(fs) - fs->lfs_ravail - fs->lfs_favail;
895 1.161 dholland wakeup(&fs->lfs_availsleep);
896 1.126 ad mutex_exit(&lfs_lock);
897 1.65 perseant (void) LFS_BWRITE_LOG(bp);
898 1.22 perseant
899 1.107 perseant if (lfs_dostats)
900 1.107 perseant ++lfs_stats.segs_reclaimed;
901 1.106 perseant
902 1.1 mycroft return (0);
903 1.1 mycroft }
904 1.1 mycroft
905 1.1 mycroft /*
906 1.1 mycroft * This will block until a segment in file system fsid is written. A timeout
907 1.1 mycroft * in milliseconds may be specified which will awake the cleaner automatically.
908 1.1 mycroft * An fsid of -1 means any file system, and a timeout of 0 means forever.
909 1.84 perseant */
910 1.84 perseant int
911 1.84 perseant lfs_segwait(fsid_t *fsidp, struct timeval *tv)
912 1.84 perseant {
913 1.84 perseant struct mount *mntp;
914 1.84 perseant void *addr;
915 1.84 perseant u_long timeout;
916 1.114 kardel int error;
917 1.84 perseant
918 1.129 ad KERNEL_LOCK(1, NULL);
919 1.106 perseant if (fsidp == NULL || (mntp = vfs_getvfs(fsidp)) == NULL)
920 1.84 perseant addr = &lfs_allclean_wakeup;
921 1.84 perseant else
922 1.161 dholland addr = &VFSTOULFS(mntp)->um_lfs->lfs_nextsegsleep;
923 1.84 perseant /*
924 1.84 perseant * XXX THIS COULD SLEEP FOREVER IF TIMEOUT IS {0,0}!
925 1.84 perseant * XXX IS THAT WHAT IS INTENDED?
926 1.84 perseant */
927 1.114 kardel timeout = tvtohz(tv);
928 1.111 perseant error = tsleep(addr, PCATCH | PVFS, "segment", timeout);
929 1.129 ad KERNEL_UNLOCK_ONE(NULL);
930 1.84 perseant return (error == ERESTART ? EINTR : 0);
931 1.84 perseant }
932 1.84 perseant
933 1.84 perseant /*
934 1.84 perseant * sys_lfs_segwait:
935 1.84 perseant *
936 1.84 perseant * System call wrapper around lfs_segwait().
937 1.1 mycroft *
938 1.1 mycroft * 0 on success
939 1.1 mycroft * 1 on timeout
940 1.1 mycroft * -1/errno is return on error.
941 1.1 mycroft */
942 1.1 mycroft int
943 1.134 christos sys___lfs_segwait50(struct lwp *l, const struct sys___lfs_segwait50_args *uap,
944 1.134 christos register_t *retval)
945 1.9 thorpej {
946 1.125 dsl /* {
947 1.32 drochner syscallarg(fsid_t *) fsidp;
948 1.32 drochner syscallarg(struct timeval *) tv;
949 1.125 dsl } */
950 1.1 mycroft struct timeval atv;
951 1.1 mycroft fsid_t fsid;
952 1.84 perseant int error;
953 1.102 perry
954 1.84 perseant /* XXX need we be su to segwait? */
955 1.142 elad error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
956 1.142 elad KAUTH_REQ_SYSTEM_LFS_SEGWAIT, NULL, NULL, NULL);
957 1.142 elad if (error)
958 1.1 mycroft return (error);
959 1.10 christos if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
960 1.1 mycroft return (error);
961 1.102 perry
962 1.5 cgd if (SCARG(uap, tv)) {
963 1.10 christos error = copyin(SCARG(uap, tv), &atv, sizeof(struct timeval));
964 1.10 christos if (error)
965 1.1 mycroft return (error);
966 1.1 mycroft if (itimerfix(&atv))
967 1.1 mycroft return (EINVAL);
968 1.84 perseant } else /* NULL or invalid */
969 1.84 perseant atv.tv_sec = atv.tv_usec = 0;
970 1.84 perseant return lfs_segwait(&fsid, &atv);
971 1.1 mycroft }
972 1.1 mycroft
973 1.1 mycroft /*
974 1.160 hannken * VFS_VGET call specialized for the cleaner. If the cleaner is
975 1.1 mycroft * processing IINFO structures, it may have the ondisk inode already, so
976 1.1 mycroft * don't go retrieving it again.
977 1.22 perseant *
978 1.160 hannken * Return the vnode referenced and locked.
979 1.1 mycroft */
980 1.22 perseant
981 1.160 hannken static int
982 1.159 hannken lfs_fastvget(struct mount *mp, ino_t ino, BLOCK_INFO *blkp, int lk_flags,
983 1.159 hannken struct vnode **vpp)
984 1.1 mycroft {
985 1.146 dholland struct ulfsmount *ump;
986 1.160 hannken int error;
987 1.102 perry
988 1.146 dholland ump = VFSTOULFS(mp);
989 1.160 hannken ump->um_cleaner_hint = blkp;
990 1.160 hannken error = vcache_get(mp, &ino, sizeof(ino), vpp);
991 1.160 hannken ump->um_cleaner_hint = NULL;
992 1.160 hannken if (error)
993 1.159 hannken return error;
994 1.160 hannken error = vn_lock(*vpp, lk_flags);
995 1.160 hannken if (error) {
996 1.159 hannken if (error == EBUSY)
997 1.159 hannken error = EAGAIN;
998 1.160 hannken vrele(*vpp);
999 1.101 perseant *vpp = NULL;
1000 1.160 hannken return error;
1001 1.44 fvdl }
1002 1.44 fvdl
1003 1.160 hannken return 0;
1004 1.1 mycroft }
1005 1.22 perseant
1006 1.85 perseant /*
1007 1.85 perseant * Make up a "fake" cleaner buffer, copy the data from userland into it.
1008 1.85 perseant */
1009 1.1 mycroft struct buf *
1010 1.122 christos lfs_fakebuf(struct lfs *fs, struct vnode *vp, int lbn, size_t size, void *uaddr)
1011 1.1 mycroft {
1012 1.1 mycroft struct buf *bp;
1013 1.25 perseant int error;
1014 1.75 yamt
1015 1.75 yamt KASSERT(VTOI(vp)->i_number != LFS_IFILE_INUM);
1016 1.73 yamt
1017 1.80 perseant bp = lfs_newbuf(VTOI(vp)->i_lfs, vp, lbn, size, LFS_NB_CLEAN);
1018 1.25 perseant error = copyin(uaddr, bp->b_data, size);
1019 1.62 chs if (error) {
1020 1.80 perseant lfs_freebuf(fs, bp);
1021 1.25 perseant return NULL;
1022 1.22 perseant }
1023 1.73 yamt KDASSERT(bp->b_iodone == lfs_callback);
1024 1.73 yamt
1025 1.65 perseant #if 0
1026 1.126 ad mutex_enter(&lfs_lock);
1027 1.65 perseant ++fs->lfs_iocount;
1028 1.126 ad mutex_exit(&lfs_lock);
1029 1.65 perseant #endif
1030 1.1 mycroft bp->b_bufsize = size;
1031 1.1 mycroft bp->b_bcount = size;
1032 1.1 mycroft return (bp);
1033 1.1 mycroft }
1034