lfs_syscalls.c revision 1.164 1 /* $NetBSD: lfs_syscalls.c,v 1.164 2015/08/02 18:14:16 dholland Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2007, 2007, 2008
5 * The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Konrad E. Schroder <perseant (at) hhhh.org>.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32 /*-
33 * Copyright (c) 1991, 1993, 1994
34 * The Regents of the University of California. All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)lfs_syscalls.c 8.10 (Berkeley) 5/14/95
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: lfs_syscalls.c,v 1.164 2015/08/02 18:14:16 dholland Exp $");
65
66 #ifndef LFS
67 # define LFS /* for prototypes in syscallargs.h */
68 #endif
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/proc.h>
73 #include <sys/buf.h>
74 #include <sys/mount.h>
75 #include <sys/vnode.h>
76 #include <sys/kernel.h>
77 #include <sys/kauth.h>
78 #include <sys/syscallargs.h>
79
80 #include <ufs/lfs/ulfs_inode.h>
81 #include <ufs/lfs/ulfsmount.h>
82 #include <ufs/lfs/ulfs_extern.h>
83
84 #include <ufs/lfs/lfs.h>
85 #include <ufs/lfs/lfs_accessors.h>
86 #include <ufs/lfs/lfs_kernel.h>
87 #include <ufs/lfs/lfs_extern.h>
88
89 static int lfs_fastvget(struct mount *, ino_t, BLOCK_INFO *, int,
90 struct vnode **);
91 struct buf *lfs_fakebuf(struct lfs *, struct vnode *, int, size_t, void *);
92
93 /*
94 * sys_lfs_markv:
95 *
96 * This will mark inodes and blocks dirty, so they are written into the log.
97 * It will block until all the blocks have been written. The segment create
98 * time passed in the block_info and inode_info structures is used to decide
99 * if the data is valid for each block (in case some process dirtied a block
100 * or inode that is being cleaned between the determination that a block is
101 * live and the lfs_markv call).
102 *
103 * 0 on success
104 * -1/errno is return on error.
105 */
106 #ifdef USE_64BIT_SYSCALLS
107 int
108 sys_lfs_markv(struct lwp *l, const struct sys_lfs_markv_args *uap, register_t *retval)
109 {
110 /* {
111 syscallarg(fsid_t *) fsidp;
112 syscallarg(struct block_info *) blkiov;
113 syscallarg(int) blkcnt;
114 } */
115 BLOCK_INFO *blkiov;
116 int blkcnt, error;
117 fsid_t fsid;
118 struct lfs *fs;
119 struct mount *mntp;
120
121 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
122 KAUTH_REQ_SYSTEM_LFS_MARKV, NULL, NULL, NULL);
123 if (error)
124 return (error);
125
126 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
127 return (error);
128
129 if ((mntp = vfs_getvfs(fsidp)) == NULL)
130 return (ENOENT);
131 fs = VFSTOULFS(mntp)->um_lfs;
132
133 blkcnt = SCARG(uap, blkcnt);
134 if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT)
135 return (EINVAL);
136
137 KERNEL_LOCK(1, NULL);
138 blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
139 if ((error = copyin(SCARG(uap, blkiov), blkiov,
140 blkcnt * sizeof(BLOCK_INFO))) != 0)
141 goto out;
142
143 if ((error = lfs_markv(p, &fsid, blkiov, blkcnt)) == 0)
144 copyout(blkiov, SCARG(uap, blkiov),
145 blkcnt * sizeof(BLOCK_INFO));
146 out:
147 lfs_free(fs, blkiov, LFS_NB_BLKIOV);
148 KERNEL_UNLOCK_ONE(NULL);
149 return error;
150 }
151 #else
152 int
153 sys_lfs_markv(struct lwp *l, const struct sys_lfs_markv_args *uap, register_t *retval)
154 {
155 /* {
156 syscallarg(fsid_t *) fsidp;
157 syscallarg(struct block_info *) blkiov;
158 syscallarg(int) blkcnt;
159 } */
160 BLOCK_INFO *blkiov;
161 BLOCK_INFO_15 *blkiov15;
162 int i, blkcnt, error;
163 fsid_t fsid;
164 struct lfs *fs;
165 struct mount *mntp;
166
167 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
168 KAUTH_REQ_SYSTEM_LFS_MARKV, NULL, NULL, NULL);
169 if (error)
170 return (error);
171
172 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
173 return (error);
174
175 if ((mntp = vfs_getvfs(&fsid)) == NULL)
176 return (ENOENT);
177 fs = VFSTOULFS(mntp)->um_lfs;
178
179 blkcnt = SCARG(uap, blkcnt);
180 if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT)
181 return (EINVAL);
182
183 KERNEL_LOCK(1, NULL);
184 blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
185 blkiov15 = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO_15), LFS_NB_BLKIOV);
186 if ((error = copyin(SCARG(uap, blkiov), blkiov15,
187 blkcnt * sizeof(BLOCK_INFO_15))) != 0)
188 goto out;
189
190 for (i = 0; i < blkcnt; i++) {
191 blkiov[i].bi_inode = blkiov15[i].bi_inode;
192 blkiov[i].bi_lbn = blkiov15[i].bi_lbn;
193 blkiov[i].bi_daddr = blkiov15[i].bi_daddr;
194 blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
195 blkiov[i].bi_version = blkiov15[i].bi_version;
196 blkiov[i].bi_bp = blkiov15[i].bi_bp;
197 blkiov[i].bi_size = blkiov15[i].bi_size;
198 }
199
200 if ((error = lfs_markv(l->l_proc, &fsid, blkiov, blkcnt)) == 0) {
201 for (i = 0; i < blkcnt; i++) {
202 blkiov15[i].bi_inode = blkiov[i].bi_inode;
203 blkiov15[i].bi_lbn = blkiov[i].bi_lbn;
204 blkiov15[i].bi_daddr = blkiov[i].bi_daddr;
205 blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
206 blkiov15[i].bi_version = blkiov[i].bi_version;
207 blkiov15[i].bi_bp = blkiov[i].bi_bp;
208 blkiov15[i].bi_size = blkiov[i].bi_size;
209 }
210 copyout(blkiov15, SCARG(uap, blkiov),
211 blkcnt * sizeof(BLOCK_INFO_15));
212 }
213 out:
214 lfs_free(fs, blkiov, LFS_NB_BLKIOV);
215 lfs_free(fs, blkiov15, LFS_NB_BLKIOV);
216 KERNEL_UNLOCK_ONE(NULL);
217 return error;
218 }
219 #endif
220
221 #define LFS_MARKV_MAX_BLOCKS (LFS_MAX_BUFS)
222
223 int
224 lfs_markv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov,
225 int blkcnt)
226 {
227 BLOCK_INFO *blkp;
228 IFILE *ifp;
229 struct buf *bp;
230 struct inode *ip = NULL;
231 struct lfs *fs;
232 struct mount *mntp;
233 struct ulfsmount *ump;
234 struct vnode *vp;
235 ino_t lastino;
236 daddr_t b_daddr;
237 int cnt, error;
238 int do_again = 0;
239 int numrefed = 0;
240 ino_t maxino;
241 size_t obsize;
242
243 /* number of blocks/inodes that we have already bwrite'ed */
244 int nblkwritten, ninowritten;
245
246 if ((mntp = vfs_getvfs(fsidp)) == NULL)
247 return (ENOENT);
248
249 ump = VFSTOULFS(mntp);
250 fs = ump->um_lfs;
251
252 if (fs->lfs_ronly)
253 return EROFS;
254
255 maxino = (lfs_fragstoblks(fs, VTOI(fs->lfs_ivnode)->i_ffs1_blocks) -
256 lfs_sb_getcleansz(fs) - lfs_sb_getsegtabsz(fs)) * lfs_sb_getifpb(fs);
257
258 cnt = blkcnt;
259
260 if ((error = vfs_busy(mntp, NULL)) != 0)
261 return (error);
262
263 /*
264 * This seglock is just to prevent the fact that we might have to sleep
265 * from allowing the possibility that our blocks might become
266 * invalid.
267 *
268 * It is also important to note here that unless we specify SEGM_CKP,
269 * any Ifile blocks that we might be asked to clean will never get
270 * to the disk.
271 */
272 lfs_seglock(fs, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);
273
274 /* Mark blocks/inodes dirty. */
275 error = 0;
276
277 /* these were inside the initialization for the for loop */
278 vp = NULL;
279 lastino = LFS_UNUSED_INUM;
280 nblkwritten = ninowritten = 0;
281 for (blkp = blkiov; cnt--; ++blkp)
282 {
283 /* Bounds-check incoming data, avoid panic for failed VGET */
284 if (blkp->bi_inode <= 0 || blkp->bi_inode >= maxino) {
285 error = EINVAL;
286 goto err3;
287 }
288 /*
289 * Get the IFILE entry (only once) and see if the file still
290 * exists.
291 */
292 if (lastino != blkp->bi_inode) {
293 /*
294 * Finish the old file, if there was one.
295 */
296 if (vp != NULL) {
297 vput(vp);
298 vp = NULL;
299 numrefed--;
300 }
301
302 /*
303 * Start a new file
304 */
305 lastino = blkp->bi_inode;
306
307 /* Get the vnode/inode. */
308 error = lfs_fastvget(mntp, blkp->bi_inode, blkp,
309 LK_EXCLUSIVE | LK_NOWAIT, &vp);
310 if (error) {
311 DLOG((DLOG_CLEAN, "lfs_markv: lfs_fastvget"
312 " failed with %d (ino %d, segment %d)\n",
313 error, blkp->bi_inode,
314 lfs_dtosn(fs, blkp->bi_daddr)));
315 /*
316 * If we got EAGAIN, that means that the
317 * Inode was locked. This is
318 * recoverable: just clean the rest of
319 * this segment, and let the cleaner try
320 * again with another. (When the
321 * cleaner runs again, this segment will
322 * sort high on the list, since it is
323 * now almost entirely empty.)
324 */
325 if (error == EAGAIN) {
326 error = 0;
327 do_again++;
328 } else
329 KASSERT(error == ENOENT);
330 KASSERT(vp == NULL);
331 ip = NULL;
332 continue;
333 }
334
335 ip = VTOI(vp);
336 numrefed++;
337 ninowritten++;
338 } else if (vp == NULL) {
339 /*
340 * This can only happen if the vnode is dead (or
341 * in any case we can't get it...e.g., it is
342 * inlocked). Keep going.
343 */
344 continue;
345 }
346
347 /* Past this point we are guaranteed that vp, ip are valid. */
348
349 /* Can't clean VU_DIROP directories in case of truncation */
350 /* XXX - maybe we should mark removed dirs specially? */
351 if (vp->v_type == VDIR && (vp->v_uflag & VU_DIROP)) {
352 do_again++;
353 continue;
354 }
355
356 /* If this BLOCK_INFO didn't contain a block, keep going. */
357 if (blkp->bi_lbn == LFS_UNUSED_LBN) {
358 /* XXX need to make sure that the inode gets written in this case */
359 /* XXX but only write the inode if it's the right one */
360 if (blkp->bi_inode != LFS_IFILE_INUM) {
361 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
362 if (ifp->if_daddr == blkp->bi_daddr) {
363 mutex_enter(&lfs_lock);
364 LFS_SET_UINO(ip, IN_CLEANING);
365 mutex_exit(&lfs_lock);
366 }
367 brelse(bp, 0);
368 }
369 continue;
370 }
371
372 b_daddr = 0;
373 if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &b_daddr, NULL) ||
374 LFS_DBTOFSB(fs, b_daddr) != blkp->bi_daddr)
375 {
376 if (lfs_dtosn(fs, LFS_DBTOFSB(fs, b_daddr)) ==
377 lfs_dtosn(fs, blkp->bi_daddr))
378 {
379 DLOG((DLOG_CLEAN, "lfs_markv: wrong da same seg: %llx vs %llx\n",
380 (long long)blkp->bi_daddr, (long long)LFS_DBTOFSB(fs, b_daddr)));
381 }
382 do_again++;
383 continue;
384 }
385
386 /*
387 * Check block sizes. The blocks being cleaned come from
388 * disk, so they should have the same size as their on-disk
389 * counterparts.
390 */
391 if (blkp->bi_lbn >= 0)
392 obsize = lfs_blksize(fs, ip, blkp->bi_lbn);
393 else
394 obsize = lfs_sb_getbsize(fs);
395 /* Check for fragment size change */
396 if (blkp->bi_lbn >= 0 && blkp->bi_lbn < ULFS_NDADDR) {
397 obsize = ip->i_lfs_fragsize[blkp->bi_lbn];
398 }
399 if (obsize != blkp->bi_size) {
400 DLOG((DLOG_CLEAN, "lfs_markv: ino %d lbn %lld wrong"
401 " size (%ld != %d), try again\n",
402 blkp->bi_inode, (long long)blkp->bi_lbn,
403 (long) obsize, blkp->bi_size));
404 do_again++;
405 continue;
406 }
407
408 /*
409 * If we get to here, then we are keeping the block. If
410 * it is an indirect block, we want to actually put it
411 * in the buffer cache so that it can be updated in the
412 * finish_meta section. If it's not, we need to
413 * allocate a fake buffer so that writeseg can perform
414 * the copyin and write the buffer.
415 */
416 if (ip->i_number != LFS_IFILE_INUM && blkp->bi_lbn >= 0) {
417 /* Data Block */
418 bp = lfs_fakebuf(fs, vp, blkp->bi_lbn,
419 blkp->bi_size, blkp->bi_bp);
420 /* Pretend we used bread() to get it */
421 bp->b_blkno = LFS_FSBTODB(fs, blkp->bi_daddr);
422 } else {
423 /* Indirect block or ifile */
424 if (blkp->bi_size != lfs_sb_getbsize(fs) &&
425 ip->i_number != LFS_IFILE_INUM)
426 panic("lfs_markv: partial indirect block?"
427 " size=%d\n", blkp->bi_size);
428 bp = getblk(vp, blkp->bi_lbn, blkp->bi_size, 0, 0);
429 if (!(bp->b_oflags & (BO_DONE|BO_DELWRI))) {
430 /*
431 * The block in question was not found
432 * in the cache; i.e., the block that
433 * getblk() returned is empty. So, we
434 * can (and should) copy in the
435 * contents, because we've already
436 * determined that this was the right
437 * version of this block on disk.
438 *
439 * And, it can't have changed underneath
440 * us, because we have the segment lock.
441 */
442 error = copyin(blkp->bi_bp, bp->b_data, blkp->bi_size);
443 if (error)
444 goto err2;
445 }
446 }
447 if ((error = lfs_bwrite_ext(bp, BW_CLEAN)) != 0)
448 goto err2;
449
450 nblkwritten++;
451 /*
452 * XXX should account indirect blocks and ifile pages as well
453 */
454 if (nblkwritten + lfs_lblkno(fs, ninowritten * sizeof (struct ulfs1_dinode))
455 > LFS_MARKV_MAX_BLOCKS) {
456 DLOG((DLOG_CLEAN, "lfs_markv: writing %d blks %d inos\n",
457 nblkwritten, ninowritten));
458 lfs_segwrite(mntp, SEGM_CLEAN);
459 nblkwritten = ninowritten = 0;
460 }
461 }
462
463 /*
464 * Finish the old file, if there was one
465 */
466 if (vp != NULL) {
467 vput(vp);
468 vp = NULL;
469 numrefed--;
470 }
471
472 #ifdef DIAGNOSTIC
473 if (numrefed != 0)
474 panic("lfs_markv: numrefed=%d", numrefed);
475 #endif
476 DLOG((DLOG_CLEAN, "lfs_markv: writing %d blks %d inos (check point)\n",
477 nblkwritten, ninowritten));
478
479 /*
480 * The last write has to be SEGM_SYNC, because of calling semantics.
481 * It also has to be SEGM_CKP, because otherwise we could write
482 * over the newly cleaned data contained in a checkpoint, and then
483 * we'd be unhappy at recovery time.
484 */
485 lfs_segwrite(mntp, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);
486
487 lfs_segunlock(fs);
488
489 vfs_unbusy(mntp, false, NULL);
490 if (error)
491 return (error);
492 else if (do_again)
493 return EAGAIN;
494
495 return 0;
496
497 err2:
498 DLOG((DLOG_CLEAN, "lfs_markv err2\n"));
499
500 /*
501 * XXX we're here because copyin() failed.
502 * XXX it means that we can't trust the cleanerd. too bad.
503 * XXX how can we recover from this?
504 */
505
506 err3:
507 /*
508 * XXX should do segwrite here anyway?
509 */
510
511 if (vp != NULL) {
512 vput(vp);
513 vp = NULL;
514 --numrefed;
515 }
516
517 lfs_segunlock(fs);
518 vfs_unbusy(mntp, false, NULL);
519 #ifdef DIAGNOSTIC
520 if (numrefed != 0)
521 panic("lfs_markv: numrefed=%d", numrefed);
522 #endif
523
524 return (error);
525 }
526
527 /*
528 * sys_lfs_bmapv:
529 *
530 * This will fill in the current disk address for arrays of blocks.
531 *
532 * 0 on success
533 * -1/errno is return on error.
534 */
535 #ifdef USE_64BIT_SYSCALLS
536 int
537 sys_lfs_bmapv(struct lwp *l, const struct sys_lfs_bmapv_args *uap, register_t *retval)
538 {
539 /* {
540 syscallarg(fsid_t *) fsidp;
541 syscallarg(struct block_info *) blkiov;
542 syscallarg(int) blkcnt;
543 } */
544 BLOCK_INFO *blkiov;
545 int blkcnt, error;
546 fsid_t fsid;
547 struct lfs *fs;
548 struct mount *mntp;
549
550 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
551 KAUTH_REQ_SYSTEM_LFS_BMAPV, NULL, NULL, NULL);
552 if (error)
553 return (error);
554
555 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
556 return (error);
557
558 if ((mntp = vfs_getvfs(&fsid)) == NULL)
559 return (ENOENT);
560 fs = VFSTOULFS(mntp)->um_lfs;
561
562 blkcnt = SCARG(uap, blkcnt);
563 if ((u_int) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO))
564 return (EINVAL);
565 KERNEL_LOCK(1, NULL);
566 blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
567 if ((error = copyin(SCARG(uap, blkiov), blkiov,
568 blkcnt * sizeof(BLOCK_INFO))) != 0)
569 goto out;
570
571 if ((error = lfs_bmapv(p, &fsid, blkiov, blkcnt)) == 0)
572 copyout(blkiov, SCARG(uap, blkiov),
573 blkcnt * sizeof(BLOCK_INFO));
574 out:
575 lfs_free(fs, blkiov, LFS_NB_BLKIOV);
576 KERNEL_UNLOCK_ONE(NULL);
577 return error;
578 }
579 #else
580 int
581 sys_lfs_bmapv(struct lwp *l, const struct sys_lfs_bmapv_args *uap, register_t *retval)
582 {
583 /* {
584 syscallarg(fsid_t *) fsidp;
585 syscallarg(struct block_info *) blkiov;
586 syscallarg(int) blkcnt;
587 } */
588 BLOCK_INFO *blkiov;
589 BLOCK_INFO_15 *blkiov15;
590 int i, blkcnt, error;
591 fsid_t fsid;
592 struct lfs *fs;
593 struct mount *mntp;
594
595 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
596 KAUTH_REQ_SYSTEM_LFS_BMAPV, NULL, NULL, NULL);
597 if (error)
598 return (error);
599
600 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
601 return (error);
602
603 if ((mntp = vfs_getvfs(&fsid)) == NULL)
604 return (ENOENT);
605 fs = VFSTOULFS(mntp)->um_lfs;
606
607 blkcnt = SCARG(uap, blkcnt);
608 if ((size_t) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO))
609 return (EINVAL);
610 KERNEL_LOCK(1, NULL);
611 blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
612 blkiov15 = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO_15), LFS_NB_BLKIOV);
613 if ((error = copyin(SCARG(uap, blkiov), blkiov15,
614 blkcnt * sizeof(BLOCK_INFO_15))) != 0)
615 goto out;
616
617 for (i = 0; i < blkcnt; i++) {
618 blkiov[i].bi_inode = blkiov15[i].bi_inode;
619 blkiov[i].bi_lbn = blkiov15[i].bi_lbn;
620 blkiov[i].bi_daddr = blkiov15[i].bi_daddr;
621 blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
622 blkiov[i].bi_version = blkiov15[i].bi_version;
623 blkiov[i].bi_bp = blkiov15[i].bi_bp;
624 blkiov[i].bi_size = blkiov15[i].bi_size;
625 }
626
627 if ((error = lfs_bmapv(l->l_proc, &fsid, blkiov, blkcnt)) == 0) {
628 for (i = 0; i < blkcnt; i++) {
629 blkiov15[i].bi_inode = blkiov[i].bi_inode;
630 blkiov15[i].bi_lbn = blkiov[i].bi_lbn;
631 blkiov15[i].bi_daddr = blkiov[i].bi_daddr;
632 blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
633 blkiov15[i].bi_version = blkiov[i].bi_version;
634 blkiov15[i].bi_bp = blkiov[i].bi_bp;
635 blkiov15[i].bi_size = blkiov[i].bi_size;
636 }
637 copyout(blkiov15, SCARG(uap, blkiov),
638 blkcnt * sizeof(BLOCK_INFO_15));
639 }
640 out:
641 lfs_free(fs, blkiov, LFS_NB_BLKIOV);
642 lfs_free(fs, blkiov15, LFS_NB_BLKIOV);
643 KERNEL_UNLOCK_ONE(NULL);
644 return error;
645 }
646 #endif
647
648 int
649 lfs_bmapv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
650 {
651 BLOCK_INFO *blkp;
652 IFILE *ifp;
653 struct buf *bp;
654 struct inode *ip = NULL;
655 struct lfs *fs;
656 struct mount *mntp;
657 struct ulfsmount *ump;
658 struct vnode *vp;
659 ino_t lastino;
660 daddr_t v_daddr;
661 int cnt, error;
662 int numrefed = 0;
663
664 if ((mntp = vfs_getvfs(fsidp)) == NULL)
665 return (ENOENT);
666
667 ump = VFSTOULFS(mntp);
668 if ((error = vfs_busy(mntp, NULL)) != 0)
669 return (error);
670
671 if (ump->um_cleaner_thread == NULL)
672 ump->um_cleaner_thread = curlwp;
673 KASSERT(ump->um_cleaner_thread == curlwp);
674
675 cnt = blkcnt;
676
677 fs = VFSTOULFS(mntp)->um_lfs;
678
679 error = 0;
680
681 /* these were inside the initialization for the for loop */
682 vp = NULL;
683 v_daddr = LFS_UNUSED_DADDR;
684 lastino = LFS_UNUSED_INUM;
685 for (blkp = blkiov; cnt--; ++blkp)
686 {
687 /*
688 * Get the IFILE entry (only once) and see if the file still
689 * exists.
690 */
691 if (lastino != blkp->bi_inode) {
692 /*
693 * Finish the old file, if there was one.
694 */
695 if (vp != NULL) {
696 vput(vp);
697 vp = NULL;
698 numrefed--;
699 }
700
701 /*
702 * Start a new file
703 */
704 lastino = blkp->bi_inode;
705 if (blkp->bi_inode == LFS_IFILE_INUM)
706 v_daddr = lfs_sb_getidaddr(fs);
707 else {
708 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
709 v_daddr = ifp->if_daddr;
710 brelse(bp, 0);
711 }
712 if (v_daddr == LFS_UNUSED_DADDR) {
713 blkp->bi_daddr = LFS_UNUSED_DADDR;
714 continue;
715 }
716 error = lfs_fastvget(mntp, blkp->bi_inode, NULL,
717 LK_SHARED, &vp);
718 if (error) {
719 DLOG((DLOG_CLEAN, "lfs_bmapv: lfs_fastvget ino"
720 "%d failed with %d",
721 blkp->bi_inode,error));
722 KASSERT(vp == NULL);
723 continue;
724 } else {
725 KASSERT(VOP_ISLOCKED(vp));
726 numrefed++;
727 }
728 ip = VTOI(vp);
729 } else if (vp == NULL) {
730 /*
731 * This can only happen if the vnode is dead.
732 * Keep going. Note that we DO NOT set the
733 * bi_addr to anything -- if we failed to get
734 * the vnode, for example, we want to assume
735 * conservatively that all of its blocks *are*
736 * located in the segment in question.
737 * lfs_markv will throw them out if we are
738 * wrong.
739 */
740 continue;
741 }
742
743 /* Past this point we are guaranteed that vp, ip are valid. */
744
745 if (blkp->bi_lbn == LFS_UNUSED_LBN) {
746 /*
747 * We just want the inode address, which is
748 * conveniently in v_daddr.
749 */
750 blkp->bi_daddr = v_daddr;
751 } else {
752 daddr_t bi_daddr;
753
754 /* XXX ondisk32 */
755 error = VOP_BMAP(vp, blkp->bi_lbn, NULL,
756 &bi_daddr, NULL);
757 if (error)
758 {
759 blkp->bi_daddr = LFS_UNUSED_DADDR;
760 continue;
761 }
762 blkp->bi_daddr = LFS_DBTOFSB(fs, bi_daddr);
763 /* Fill in the block size, too */
764 if (blkp->bi_lbn >= 0)
765 blkp->bi_size = lfs_blksize(fs, ip, blkp->bi_lbn);
766 else
767 blkp->bi_size = lfs_sb_getbsize(fs);
768 }
769 }
770
771 /*
772 * Finish the old file, if there was one.
773 */
774 if (vp != NULL) {
775 vput(vp);
776 vp = NULL;
777 numrefed--;
778 }
779
780 #ifdef DIAGNOSTIC
781 if (numrefed != 0)
782 panic("lfs_bmapv: numrefed=%d", numrefed);
783 #endif
784
785 vfs_unbusy(mntp, false, NULL);
786
787 return 0;
788 }
789
790 /*
791 * sys_lfs_segclean:
792 *
793 * Mark the segment clean.
794 *
795 * 0 on success
796 * -1/errno is return on error.
797 */
798 int
799 sys_lfs_segclean(struct lwp *l, const struct sys_lfs_segclean_args *uap, register_t *retval)
800 {
801 /* {
802 syscallarg(fsid_t *) fsidp;
803 syscallarg(u_long) segment;
804 } */
805 struct lfs *fs;
806 struct mount *mntp;
807 fsid_t fsid;
808 int error;
809 unsigned long segnum;
810
811 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
812 KAUTH_REQ_SYSTEM_LFS_SEGCLEAN, NULL, NULL, NULL);
813 if (error)
814 return (error);
815
816 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
817 return (error);
818 if ((mntp = vfs_getvfs(&fsid)) == NULL)
819 return (ENOENT);
820
821 fs = VFSTOULFS(mntp)->um_lfs;
822 segnum = SCARG(uap, segment);
823
824 if ((error = vfs_busy(mntp, NULL)) != 0)
825 return (error);
826
827 KERNEL_LOCK(1, NULL);
828 lfs_seglock(fs, SEGM_PROT);
829 error = lfs_do_segclean(fs, segnum);
830 lfs_segunlock(fs);
831 KERNEL_UNLOCK_ONE(NULL);
832 vfs_unbusy(mntp, false, NULL);
833 return error;
834 }
835
836 /*
837 * Actually mark the segment clean.
838 * Must be called with the segment lock held.
839 */
840 int
841 lfs_do_segclean(struct lfs *fs, unsigned long segnum)
842 {
843 extern int lfs_dostats;
844 struct buf *bp;
845 CLEANERINFO *cip;
846 SEGUSE *sup;
847
848 if (lfs_dtosn(fs, lfs_sb_getcurseg(fs)) == segnum) {
849 return (EBUSY);
850 }
851
852 LFS_SEGENTRY(sup, fs, segnum, bp);
853 if (sup->su_nbytes) {
854 DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:"
855 " %d live bytes\n", segnum, sup->su_nbytes));
856 brelse(bp, 0);
857 return (EBUSY);
858 }
859 if (sup->su_flags & SEGUSE_ACTIVE) {
860 DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:"
861 " segment is active\n", segnum));
862 brelse(bp, 0);
863 return (EBUSY);
864 }
865 if (!(sup->su_flags & SEGUSE_DIRTY)) {
866 DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:"
867 " segment is already clean\n", segnum));
868 brelse(bp, 0);
869 return (EALREADY);
870 }
871
872 lfs_sb_addavail(fs, lfs_segtod(fs, 1));
873 if (sup->su_flags & SEGUSE_SUPERBLOCK)
874 lfs_sb_subavail(fs, lfs_btofsb(fs, LFS_SBPAD));
875 if (lfs_sb_getversion(fs) > 1 && segnum == 0 &&
876 lfs_sb_gets0addr(fs) < lfs_btofsb(fs, LFS_LABELPAD))
877 lfs_sb_subavail(fs, lfs_btofsb(fs, LFS_LABELPAD) - lfs_sb_gets0addr(fs));
878 mutex_enter(&lfs_lock);
879 lfs_sb_addbfree(fs, sup->su_nsums * lfs_btofsb(fs, lfs_sb_getsumsize(fs)) +
880 lfs_btofsb(fs, sup->su_ninos * lfs_sb_getibsize(fs)));
881 lfs_sb_subdmeta(fs, sup->su_nsums * lfs_btofsb(fs, lfs_sb_getsumsize(fs)) +
882 lfs_btofsb(fs, sup->su_ninos * lfs_sb_getibsize(fs)));
883 if (lfs_sb_getdmeta(fs) < 0)
884 lfs_sb_setdmeta(fs, 0);
885 mutex_exit(&lfs_lock);
886 sup->su_flags &= ~SEGUSE_DIRTY;
887 LFS_WRITESEGENTRY(sup, fs, segnum, bp);
888
889 LFS_CLEANERINFO(cip, fs, bp);
890 ++cip->clean;
891 --cip->dirty;
892 lfs_sb_setnclean(fs, cip->clean);
893 mutex_enter(&lfs_lock);
894 cip->bfree = lfs_sb_getbfree(fs);
895 cip->avail = lfs_sb_getavail(fs) - fs->lfs_ravail - fs->lfs_favail;
896 wakeup(&fs->lfs_availsleep);
897 mutex_exit(&lfs_lock);
898 (void) LFS_BWRITE_LOG(bp);
899
900 if (lfs_dostats)
901 ++lfs_stats.segs_reclaimed;
902
903 return (0);
904 }
905
906 /*
907 * This will block until a segment in file system fsid is written. A timeout
908 * in milliseconds may be specified which will awake the cleaner automatically.
909 * An fsid of -1 means any file system, and a timeout of 0 means forever.
910 */
911 int
912 lfs_segwait(fsid_t *fsidp, struct timeval *tv)
913 {
914 struct mount *mntp;
915 void *addr;
916 u_long timeout;
917 int error;
918
919 KERNEL_LOCK(1, NULL);
920 if (fsidp == NULL || (mntp = vfs_getvfs(fsidp)) == NULL)
921 addr = &lfs_allclean_wakeup;
922 else
923 addr = &VFSTOULFS(mntp)->um_lfs->lfs_nextsegsleep;
924 /*
925 * XXX THIS COULD SLEEP FOREVER IF TIMEOUT IS {0,0}!
926 * XXX IS THAT WHAT IS INTENDED?
927 */
928 timeout = tvtohz(tv);
929 error = tsleep(addr, PCATCH | PVFS, "segment", timeout);
930 KERNEL_UNLOCK_ONE(NULL);
931 return (error == ERESTART ? EINTR : 0);
932 }
933
934 /*
935 * sys_lfs_segwait:
936 *
937 * System call wrapper around lfs_segwait().
938 *
939 * 0 on success
940 * 1 on timeout
941 * -1/errno is return on error.
942 */
943 int
944 sys___lfs_segwait50(struct lwp *l, const struct sys___lfs_segwait50_args *uap,
945 register_t *retval)
946 {
947 /* {
948 syscallarg(fsid_t *) fsidp;
949 syscallarg(struct timeval *) tv;
950 } */
951 struct timeval atv;
952 fsid_t fsid;
953 int error;
954
955 /* XXX need we be su to segwait? */
956 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
957 KAUTH_REQ_SYSTEM_LFS_SEGWAIT, NULL, NULL, NULL);
958 if (error)
959 return (error);
960 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
961 return (error);
962
963 if (SCARG(uap, tv)) {
964 error = copyin(SCARG(uap, tv), &atv, sizeof(struct timeval));
965 if (error)
966 return (error);
967 if (itimerfix(&atv))
968 return (EINVAL);
969 } else /* NULL or invalid */
970 atv.tv_sec = atv.tv_usec = 0;
971 return lfs_segwait(&fsid, &atv);
972 }
973
974 /*
975 * VFS_VGET call specialized for the cleaner. If the cleaner is
976 * processing IINFO structures, it may have the ondisk inode already, so
977 * don't go retrieving it again.
978 *
979 * Return the vnode referenced and locked.
980 */
981
982 static int
983 lfs_fastvget(struct mount *mp, ino_t ino, BLOCK_INFO *blkp, int lk_flags,
984 struct vnode **vpp)
985 {
986 struct ulfsmount *ump;
987 int error;
988
989 ump = VFSTOULFS(mp);
990 ump->um_cleaner_hint = blkp;
991 error = vcache_get(mp, &ino, sizeof(ino), vpp);
992 ump->um_cleaner_hint = NULL;
993 if (error)
994 return error;
995 error = vn_lock(*vpp, lk_flags);
996 if (error) {
997 if (error == EBUSY)
998 error = EAGAIN;
999 vrele(*vpp);
1000 *vpp = NULL;
1001 return error;
1002 }
1003
1004 return 0;
1005 }
1006
1007 /*
1008 * Make up a "fake" cleaner buffer, copy the data from userland into it.
1009 */
1010 struct buf *
1011 lfs_fakebuf(struct lfs *fs, struct vnode *vp, int lbn, size_t size, void *uaddr)
1012 {
1013 struct buf *bp;
1014 int error;
1015
1016 KASSERT(VTOI(vp)->i_number != LFS_IFILE_INUM);
1017
1018 bp = lfs_newbuf(VTOI(vp)->i_lfs, vp, lbn, size, LFS_NB_CLEAN);
1019 error = copyin(uaddr, bp->b_data, size);
1020 if (error) {
1021 lfs_freebuf(fs, bp);
1022 return NULL;
1023 }
1024 KDASSERT(bp->b_iodone == lfs_callback);
1025
1026 #if 0
1027 mutex_enter(&lfs_lock);
1028 ++fs->lfs_iocount;
1029 mutex_exit(&lfs_lock);
1030 #endif
1031 bp->b_bufsize = size;
1032 bp->b_bcount = size;
1033 return (bp);
1034 }
1035