lfs_syscalls.c revision 1.16 1 /* $NetBSD: lfs_syscalls.c,v 1.16 1998/03/01 02:23:25 fvdl Exp $ */
2
3 /*-
4 * Copyright (c) 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)lfs_syscalls.c 8.10 (Berkeley) 5/14/95
36 */
37
38 #include "fs_lfs.h" /* for prototypes in syscallargs.h */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/buf.h>
44 #include <sys/mount.h>
45 #include <sys/vnode.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48
49 #include <sys/syscallargs.h>
50
51 #include <ufs/ufs/quota.h>
52 #include <ufs/ufs/inode.h>
53 #include <ufs/ufs/ufsmount.h>
54 #include <ufs/ufs/ufs_extern.h>
55
56 #include <ufs/lfs/lfs.h>
57 #include <ufs/lfs/lfs_extern.h>
58
59 #define BUMP_FIP(SP) \
60 (SP)->fip = (FINFO *) (&(SP)->fip->fi_blocks[(SP)->fip->fi_nblocks])
61
62 #define INC_FINFO(SP) ++((SEGSUM *)((SP)->segsum))->ss_nfinfo
63 #define DEC_FINFO(SP) --((SEGSUM *)((SP)->segsum))->ss_nfinfo
64
65 /*
66 * Before committing to add something to a segment summary, make sure there
67 * is enough room. S is the bytes added to the summary.
68 */
69 #define CHECK_SEG(s) \
70 if (sp->sum_bytes_left < (s)) { \
71 (void) lfs_writeseg(fs, sp); \
72 }
73 struct buf *lfs_fakebuf __P((struct vnode *, int, size_t, caddr_t));
74
75 int debug_cleaner = 0;
76 int clean_vnlocked = 0;
77 int clean_inlocked = 0;
78
79 /*
80 * lfs_markv:
81 *
82 * This will mark inodes and blocks dirty, so they are written into the log.
83 * It will block until all the blocks have been written. The segment create
84 * time passed in the block_info and inode_info structures is used to decide
85 * if the data is valid for each block (in case some process dirtied a block
86 * or inode that is being cleaned between the determination that a block is
87 * live and the lfs_markv call).
88 *
89 * 0 on success
90 * -1/errno is return on error.
91 */
92 int
93 lfs_markv(p, v, retval)
94 struct proc *p;
95 void *v;
96 register_t *retval;
97 {
98 struct lfs_markv_args /* {
99 syscallarg(fsid_t *) fsidp;
100 syscallarg(struct block_info *) blkiov;
101 syscallarg(int) blkcnt;
102 } */ *uap = v;
103 struct segment *sp;
104 BLOCK_INFO *blkp;
105 IFILE *ifp;
106 struct buf *bp, **bpp;
107 struct inode *ip = NULL;
108 struct lfs *fs;
109 struct mount *mntp;
110 struct vnode *vp;
111 fsid_t fsid;
112 void *start;
113 ino_t lastino;
114 ufs_daddr_t b_daddr, v_daddr;
115 u_long bsize;
116 int cnt, error;
117
118 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
119 return (error);
120
121 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
122 return (error);
123 if ((mntp = vfs_getvfs(&fsid)) == NULL)
124 return (EINVAL);
125
126 cnt = SCARG(uap, blkcnt);
127 start = malloc(cnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
128 error = copyin(SCARG(uap, blkiov), start, cnt * sizeof(BLOCK_INFO));
129 if (error)
130 goto err1;
131
132 /* Mark blocks/inodes dirty. */
133 fs = VFSTOUFS(mntp)->um_lfs;
134 bsize = fs->lfs_bsize;
135 error = 0;
136
137 lfs_seglock(fs, SEGM_SYNC | SEGM_CLEAN);
138 sp = fs->lfs_sp;
139 for (v_daddr = LFS_UNUSED_DADDR, lastino = LFS_UNUSED_INUM,
140 blkp = start; cnt--; ++blkp) {
141 /*
142 * Get the IFILE entry (only once) and see if the file still
143 * exists.
144 */
145 if (lastino != blkp->bi_inode) {
146 if (lastino != LFS_UNUSED_INUM) {
147 /* Finish up last file */
148 if (sp->fip->fi_nblocks == 0) {
149 DEC_FINFO(sp);
150 sp->sum_bytes_left +=
151 sizeof(FINFO) - sizeof(ufs_daddr_t);
152 } else {
153 lfs_updatemeta(sp);
154 BUMP_FIP(sp);
155 }
156
157 lfs_writeinode(fs, sp, ip);
158 lfs_vunref(vp);
159 }
160
161 /* Start a new file */
162 CHECK_SEG(sizeof(FINFO));
163 sp->sum_bytes_left -= sizeof(FINFO) - sizeof(ufs_daddr_t);
164 INC_FINFO(sp);
165 sp->start_lbp = &sp->fip->fi_blocks[0];
166 sp->vp = NULL;
167 sp->fip->fi_version = blkp->bi_version;
168 sp->fip->fi_nblocks = 0;
169 sp->fip->fi_ino = blkp->bi_inode;
170 lastino = blkp->bi_inode;
171 if (blkp->bi_inode == LFS_IFILE_INUM)
172 v_daddr = fs->lfs_idaddr;
173 else {
174 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
175 v_daddr = ifp->if_daddr;
176 brelse(bp);
177 }
178 if (v_daddr == LFS_UNUSED_DADDR)
179 continue;
180
181 /* Get the vnode/inode. */
182 if (lfs_fastvget(mntp, blkp->bi_inode, v_daddr, &vp,
183 blkp->bi_lbn == LFS_UNUSED_LBN ?
184 blkp->bi_bp : NULL)) {
185 #ifdef DIAGNOSTIC
186 printf("lfs_markv: VFS_VGET failed (%d)\n",
187 blkp->bi_inode);
188 panic("lfs_markv VFS_VGET FAILED");
189 #endif
190 lastino = LFS_UNUSED_INUM;
191 v_daddr = LFS_UNUSED_DADDR;
192 continue;
193 }
194 sp->vp = vp;
195 ip = VTOI(vp);
196 } else if (v_daddr == LFS_UNUSED_DADDR)
197 continue;
198
199 /* If this BLOCK_INFO didn't contain a block, keep going. */
200 if (blkp->bi_lbn == LFS_UNUSED_LBN)
201 continue;
202 if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &b_daddr, NULL) ||
203 b_daddr != blkp->bi_daddr)
204 continue;
205 /*
206 * If we got to here, then we are keeping the block. If it
207 * is an indirect block, we want to actually put it in the
208 * buffer cache so that it can be updated in the finish_meta
209 * section. If it's not, we need to allocate a fake buffer
210 * so that writeseg can perform the copyin and write the buffer.
211 */
212 if (blkp->bi_lbn >= 0) /* Data Block */
213 bp = lfs_fakebuf(vp, blkp->bi_lbn, bsize,
214 blkp->bi_bp);
215 else {
216 bp = getblk(vp, blkp->bi_lbn, bsize, 0, 0);
217 if (!(bp->b_flags & (B_DELWRI | B_DONE | B_CACHE)) &&
218 (error = copyin(blkp->bi_bp, bp->b_data,
219 blkp->bi_size)))
220 goto err2;
221 if ((error = VOP_BWRITE(bp)) != 0)
222 goto err2;
223 }
224 while (lfs_gatherblock(sp, bp, NULL));
225 }
226 if (sp->vp) {
227 if (sp->fip->fi_nblocks == 0) {
228 DEC_FINFO(sp);
229 sp->sum_bytes_left +=
230 sizeof(FINFO) - sizeof(ufs_daddr_t);
231 } else
232 lfs_updatemeta(sp);
233
234 lfs_writeinode(fs, sp, ip);
235 lfs_vunref(vp);
236 }
237 (void) lfs_writeseg(fs, sp);
238 lfs_segunlock(fs);
239 free(start, M_SEGMENT);
240 return (error);
241
242 /*
243 * XXX
244 * If we come in to error 2, we might have indirect blocks that were
245 * updated and now have bad block pointers. I don't know what to do
246 * about this.
247 */
248
249 err2: lfs_vunref(vp);
250 /* Free up fakebuffers */
251 for (bpp = --sp->cbpp; bpp >= sp->bpp; --bpp)
252 if ((*bpp)->b_flags & B_CALL) {
253 brelvp(*bpp);
254 free(*bpp, M_SEGMENT);
255 } else
256 brelse(*bpp);
257 lfs_segunlock(fs);
258 err1:
259 free(start, M_SEGMENT);
260 return (error);
261 }
262
263 /*
264 * lfs_bmapv:
265 *
266 * This will fill in the current disk address for arrays of blocks.
267 *
268 * 0 on success
269 * -1/errno is return on error.
270 */
271 int
272 lfs_bmapv(p, v, retval)
273 struct proc *p;
274 void *v;
275 register_t *retval;
276 {
277 struct lfs_bmapv_args /* {
278 syscallarg(fsid_t *) fsidp;
279 syscallarg(struct block_info *) blkiov;
280 syscallarg(int) blkcnt;
281 } */ *uap = v;
282 BLOCK_INFO *blkp;
283 struct mount *mntp;
284 struct ufsmount *ump;
285 struct vnode *vp;
286 fsid_t fsid;
287 void *start;
288 ufs_daddr_t daddr;
289 int cnt, error, step;
290
291 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
292 return (error);
293
294 error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t));
295 if (error)
296 return (error);
297 if ((mntp = vfs_getvfs(&fsid)) == NULL)
298 return (EINVAL);
299
300 cnt = SCARG(uap, blkcnt);
301 start = blkp = malloc(cnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
302 error = copyin(SCARG(uap, blkiov), blkp, cnt * sizeof(BLOCK_INFO));
303 if (error) {
304 free(blkp, M_SEGMENT);
305 return (error);
306 }
307
308 for (step = cnt; step--; ++blkp) {
309 if (blkp->bi_lbn == LFS_UNUSED_LBN)
310 continue;
311 /*
312 * A regular call to VFS_VGET could deadlock
313 * here. Instead, we try an unlocked access.
314 */
315 ump = VFSTOUFS(mntp);
316 if ((vp =
317 ufs_ihashlookup(ump->um_dev, blkp->bi_inode)) != NULL) {
318 if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &daddr, NULL))
319 daddr = LFS_UNUSED_DADDR;
320 } else if (VFS_VGET(mntp, blkp->bi_inode, &vp))
321 daddr = LFS_UNUSED_DADDR;
322 else {
323 if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &daddr, NULL))
324 daddr = LFS_UNUSED_DADDR;
325 vput(vp);
326 }
327 blkp->bi_daddr = daddr;
328 }
329 copyout(start, SCARG(uap, blkiov), cnt * sizeof(BLOCK_INFO));
330 free(start, M_SEGMENT);
331 return (0);
332 }
333
334 /*
335 * lfs_segclean:
336 *
337 * Mark the segment clean.
338 *
339 * 0 on success
340 * -1/errno is return on error.
341 */
342 int
343 lfs_segclean(p, v, retval)
344 struct proc *p;
345 void *v;
346 register_t *retval;
347 {
348 struct lfs_segclean_args /* {
349 syscallarg(fsid_t *) fsidp;
350 syscallarg(u_long) segment;
351 } */ *uap = v;
352 CLEANERINFO *cip;
353 SEGUSE *sup;
354 struct buf *bp;
355 struct mount *mntp;
356 struct lfs *fs;
357 fsid_t fsid;
358 int error;
359
360 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
361 return (error);
362
363 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
364 return (error);
365 if ((mntp = vfs_getvfs(&fsid)) == NULL)
366 return (EINVAL);
367
368 fs = VFSTOUFS(mntp)->um_lfs;
369
370 if (datosn(fs, fs->lfs_curseg) == SCARG(uap, segment))
371 return (EBUSY);
372
373 LFS_SEGENTRY(sup, fs, SCARG(uap, segment), bp);
374 if (sup->su_flags & SEGUSE_ACTIVE) {
375 brelse(bp);
376 return (EBUSY);
377 }
378 fs->lfs_avail += fsbtodb(fs, fs->lfs_ssize) - 1;
379 fs->lfs_bfree += (sup->su_nsums * LFS_SUMMARY_SIZE / DEV_BSIZE) +
380 sup->su_ninos * btodb(fs->lfs_bsize);
381 sup->su_flags &= ~SEGUSE_DIRTY;
382 (void) VOP_BWRITE(bp);
383
384 LFS_CLEANERINFO(cip, fs, bp);
385 ++cip->clean;
386 --cip->dirty;
387 (void) VOP_BWRITE(bp);
388 wakeup(&fs->lfs_avail);
389 return (0);
390 }
391
392 /*
393 * lfs_segwait:
394 *
395 * This will block until a segment in file system fsid is written. A timeout
396 * in milliseconds may be specified which will awake the cleaner automatically.
397 * An fsid of -1 means any file system, and a timeout of 0 means forever.
398 *
399 * 0 on success
400 * 1 on timeout
401 * -1/errno is return on error.
402 */
403 int
404 lfs_segwait(p, v, retval)
405 struct proc *p;
406 void *v;
407 register_t *retval;
408 {
409 struct lfs_segwait_args /* {
410 syscallarg(fsid_t *) fsidp;
411 syscallarg(struct timeval *) tv;
412 } */ *uap = v;
413 extern int lfs_allclean_wakeup;
414 struct mount *mntp;
415 struct timeval atv;
416 fsid_t fsid;
417 void *addr;
418 u_long timeout;
419 int error, s;
420
421 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) {
422 return (error);
423 }
424 #ifdef WHEN_QUADS_WORK
425 if (error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t)))
426 return (error);
427 if (fsid == (fsid_t)-1)
428 addr = &lfs_allclean_wakeup;
429 else {
430 if ((mntp = vfs_getvfs(&fsid)) == NULL)
431 return (EINVAL);
432 addr = &VFSTOUFS(mntp)->um_lfs->lfs_nextseg;
433 }
434 #else
435 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
436 return (error);
437 if ((mntp = vfs_getvfs(&fsid)) == NULL)
438 addr = &lfs_allclean_wakeup;
439 else
440 addr = &VFSTOUFS(mntp)->um_lfs->lfs_nextseg;
441 #endif
442
443 if (SCARG(uap, tv)) {
444 error = copyin(SCARG(uap, tv), &atv, sizeof(struct timeval));
445 if (error)
446 return (error);
447 if (itimerfix(&atv))
448 return (EINVAL);
449 s = splclock();
450 timeradd(&atv, &time, &atv);
451 timeout = hzto(&atv);
452 splx(s);
453 } else
454 timeout = 0;
455
456 error = tsleep(addr, PCATCH | PUSER, "segment", timeout);
457 return (error == ERESTART ? EINTR : 0);
458 }
459
460 /*
461 * VFS_VGET call specialized for the cleaner. The cleaner already knows the
462 * daddr from the ifile, so don't look it up again. If the cleaner is
463 * processing IINFO structures, it may have the ondisk inode already, so
464 * don't go retrieving it again.
465 */
466 int
467 lfs_fastvget(mp, ino, daddr, vpp, dinp)
468 struct mount *mp;
469 ino_t ino;
470 ufs_daddr_t daddr;
471 struct vnode **vpp;
472 struct dinode *dinp;
473 {
474 register struct inode *ip;
475 struct vnode *vp;
476 struct ufsmount *ump;
477 struct buf *bp;
478 dev_t dev;
479 int error;
480
481 ump = VFSTOUFS(mp);
482 dev = ump->um_dev;
483 /*
484 * This is playing fast and loose. Someone may have the inode
485 * locked, in which case they are going to be distinctly unhappy
486 * if we trash something.
487 */
488 if ((*vpp = ufs_ihashlookup(dev, ino)) != NULL) {
489 lfs_vref(*vpp);
490 if ((*vpp)->v_flag & VXLOCK)
491 clean_vnlocked++;
492 ip = VTOI(*vpp);
493 if (lockstatus(&ip->i_lock))
494 clean_inlocked++;
495 if (!(ip->i_flag & IN_MODIFIED))
496 ++ump->um_lfs->lfs_uinodes;
497 ip->i_flag |= IN_MODIFIED;
498 return (0);
499 }
500
501 /* Allocate new vnode/inode. */
502 if ((error = lfs_vcreate(mp, ino, &vp)) != 0) {
503 *vpp = NULL;
504 return (error);
505 }
506
507 /*
508 * Put it onto its hash chain and lock it so that other requests for
509 * this inode will block if they arrive while we are sleeping waiting
510 * for old data structures to be purged or for the contents of the
511 * disk portion of this inode to be read.
512 */
513 ip = VTOI(vp);
514 ufs_ihashins(ip);
515
516 /*
517 * XXX
518 * This may not need to be here, logically it should go down with
519 * the i_devvp initialization.
520 * Ask Kirk.
521 */
522 ip->i_lfs = ump->um_lfs;
523
524 /* Read in the disk contents for the inode, copy into the inode. */
525 if (dinp) {
526 error = copyin(dinp, &ip->i_din.ffs_din, sizeof(struct dinode));
527 if (error)
528 return (error);
529 }
530 else {
531 error = bread(ump->um_devvp, daddr,
532 (int)ump->um_lfs->lfs_bsize, NOCRED, &bp);
533 if (error) {
534 /*
535 * The inode does not contain anything useful, so it
536 * would be misleading to leave it on its hash chain.
537 * Iput() will return it to the free list.
538 */
539 ufs_ihashrem(ip);
540
541 /* Unlock and discard unneeded inode. */
542 lfs_vunref(vp);
543 brelse(bp);
544 *vpp = NULL;
545 return (error);
546 }
547 ip->i_din.ffs_din =
548 *lfs_ifind(ump->um_lfs, ino, (struct dinode *)bp->b_data);
549 brelse(bp);
550 }
551
552 /*
553 * Initialize the vnode from the inode, check for aliases. In all
554 * cases re-init ip, the underlying vnode/inode may have changed.
555 */
556 error = ufs_vinit(mp, lfs_specop_p, LFS_FIFOOPS, &vp);
557 if (error) {
558 lfs_vunref(vp);
559 *vpp = NULL;
560 return (error);
561 }
562 /*
563 * Finish inode initialization now that aliasing has been resolved.
564 */
565 ip->i_devvp = ump->um_devvp;
566 ip->i_flag |= IN_MODIFIED;
567 ++ump->um_lfs->lfs_uinodes;
568 VREF(ip->i_devvp);
569 *vpp = vp;
570 return (0);
571 }
572 struct buf *
573 lfs_fakebuf(vp, lbn, size, uaddr)
574 struct vnode *vp;
575 int lbn;
576 size_t size;
577 caddr_t uaddr;
578 {
579 struct buf *bp;
580
581 bp = lfs_newbuf(vp, lbn, 0);
582 bp->b_saveaddr = uaddr;
583 bp->b_bufsize = size;
584 bp->b_bcount = size;
585 bp->b_flags |= B_INVAL;
586 return (bp);
587 }
588