lfs.c revision 1.51 1 /* $NetBSD: lfs.c,v 1.51 2015/08/02 18:14:16 dholland Exp $ */
2 /*-
3 * Copyright (c) 2003 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Konrad E. Schroder <perseant (at) hhhh.org>.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30 /*
31 * Copyright (c) 1989, 1991, 1993
32 * The Regents of the University of California. All rights reserved.
33 * (c) UNIX System Laboratories, Inc.
34 * All or some portions of this file are derived from material licensed
35 * to the University of California by American Telephone and Telegraph
36 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
37 * the permission of UNIX System Laboratories, Inc.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 *
63 * @(#)ufs_bmap.c 8.8 (Berkeley) 8/11/95
64 */
65
66
67 #include <sys/types.h>
68 #include <sys/param.h>
69 #include <sys/time.h>
70 #include <sys/buf.h>
71 #include <sys/mount.h>
72
73 #define vnode uvnode
74 #include <ufs/lfs/lfs.h>
75 #include <ufs/lfs/lfs_inode.h>
76 #include <ufs/lfs/lfs_accessors.h>
77 #undef vnode
78
79 #include <assert.h>
80 #include <err.h>
81 #include <errno.h>
82 #include <stdarg.h>
83 #include <stdio.h>
84 #include <stdlib.h>
85 #include <string.h>
86 #include <unistd.h>
87 #include <util.h>
88
89 #include "bufcache.h"
90 #include "vnode.h"
91 #include "lfs_user.h"
92 #include "segwrite.h"
93 #include "kernelops.h"
94
95 #define panic call_panic
96
97 extern u_int32_t cksum(void *, size_t);
98 extern u_int32_t lfs_sb_cksum(struct dlfs *);
99 extern void pwarn(const char *, ...);
100
101 extern struct uvnodelst vnodelist;
102 extern struct uvnodelst getvnodelist[VNODE_HASH_MAX];
103 extern int nvnodes;
104
105 long dev_bsize = DEV_BSIZE;
106
107 static int
108 lfs_fragextend(struct uvnode *, int, int, daddr_t, struct ubuf **);
109
110 int fsdirty = 0;
111 void (*panic_func)(int, const char *, va_list) = my_vpanic;
112
113 /*
114 * LFS buffer and uvnode operations
115 */
116
117 int
118 lfs_vop_strategy(struct ubuf * bp)
119 {
120 int count;
121
122 if (bp->b_flags & B_READ) {
123 count = kops.ko_pread(bp->b_vp->v_fd, bp->b_data, bp->b_bcount,
124 bp->b_blkno * dev_bsize);
125 if (count == bp->b_bcount)
126 bp->b_flags |= B_DONE;
127 } else {
128 count = kops.ko_pwrite(bp->b_vp->v_fd, bp->b_data, bp->b_bcount,
129 bp->b_blkno * dev_bsize);
130 if (count == 0) {
131 perror("pwrite");
132 return -1;
133 }
134 bp->b_flags &= ~B_DELWRI;
135 reassignbuf(bp, bp->b_vp);
136 }
137 return 0;
138 }
139
140 int
141 lfs_vop_bwrite(struct ubuf * bp)
142 {
143 struct lfs *fs;
144
145 fs = bp->b_vp->v_fs;
146 if (!(bp->b_flags & B_DELWRI)) {
147 lfs_sb_subavail(fs, lfs_btofsb(fs, bp->b_bcount));
148 }
149 bp->b_flags |= B_DELWRI | B_LOCKED;
150 reassignbuf(bp, bp->b_vp);
151 brelse(bp, 0);
152 return 0;
153 }
154
155 /*
156 * ulfs_bmaparray does the bmap conversion, and if requested returns the
157 * array of logical blocks which must be traversed to get to a block.
158 * Each entry contains the offset into that block that gets you to the
159 * next block and the disk address of the block (if it is assigned).
160 */
161 int
162 ulfs_bmaparray(struct lfs * fs, struct uvnode * vp, daddr_t bn, daddr_t * bnp, struct indir * ap, int *nump)
163 {
164 struct inode *ip;
165 struct ubuf *bp;
166 struct indir a[ULFS_NIADDR + 1], *xap;
167 daddr_t daddr;
168 daddr_t metalbn;
169 int error, num;
170
171 ip = VTOI(vp);
172
173 if (bn >= 0 && bn < ULFS_NDADDR) {
174 if (nump != NULL)
175 *nump = 0;
176 *bnp = LFS_FSBTODB(fs, ip->i_ffs1_db[bn]);
177 if (*bnp == 0)
178 *bnp = -1;
179 return (0);
180 }
181 xap = ap == NULL ? a : ap;
182 if (!nump)
183 nump = #
184 if ((error = ulfs_getlbns(fs, vp, bn, xap, nump)) != 0)
185 return (error);
186
187 num = *nump;
188
189 /* Get disk address out of indirect block array */
190 daddr = ip->i_ffs1_ib[xap->in_off];
191
192 for (bp = NULL, ++xap; --num; ++xap) {
193 /* Exit the loop if there is no disk address assigned yet and
194 * the indirect block isn't in the cache, or if we were
195 * looking for an indirect block and we've found it. */
196
197 metalbn = xap->in_lbn;
198 if ((daddr == 0 && !incore(vp, metalbn)) || metalbn == bn)
199 break;
200 /*
201 * If we get here, we've either got the block in the cache
202 * or we have a disk address for it, go fetch it.
203 */
204 if (bp)
205 brelse(bp, 0);
206
207 xap->in_exists = 1;
208 bp = getblk(vp, metalbn, lfs_sb_getbsize(fs));
209
210 if (!(bp->b_flags & (B_DONE | B_DELWRI))) {
211 bp->b_blkno = LFS_FSBTODB(fs, daddr);
212 bp->b_flags |= B_READ;
213 VOP_STRATEGY(bp);
214 }
215 daddr = ((ulfs_daddr_t *) bp->b_data)[xap->in_off];
216 }
217 if (bp)
218 brelse(bp, 0);
219
220 daddr = LFS_FSBTODB(fs, (ulfs_daddr_t) daddr);
221 *bnp = daddr == 0 ? -1 : daddr;
222 return (0);
223 }
224
225 /*
226 * Create an array of logical block number/offset pairs which represent the
227 * path of indirect blocks required to access a data block. The first "pair"
228 * contains the logical block number of the appropriate single, double or
229 * triple indirect block and the offset into the inode indirect block array.
230 * Note, the logical block number of the inode single/double/triple indirect
231 * block appears twice in the array, once with the offset into the i_ffs1_ib and
232 * once with the offset into the page itself.
233 */
234 int
235 ulfs_getlbns(struct lfs * fs, struct uvnode * vp, daddr_t bn, struct indir * ap, int *nump)
236 {
237 daddr_t metalbn, realbn;
238 int64_t blockcnt;
239 int lbc;
240 int i, numlevels, off;
241 int lognindir, indir;
242
243 metalbn = 0; /* XXXGCC -Wuninitialized [sh3] */
244
245 if (nump)
246 *nump = 0;
247 numlevels = 0;
248 realbn = bn;
249 if (bn < 0)
250 bn = -bn;
251
252 lognindir = -1;
253 for (indir = lfs_sb_getnindir(fs); indir; indir >>= 1)
254 ++lognindir;
255
256 /* Determine the number of levels of indirection. After this loop is
257 * done, blockcnt indicates the number of data blocks possible at the
258 * given level of indirection, and ULFS_NIADDR - i is the number of levels
259 * of indirection needed to locate the requested block. */
260
261 bn -= ULFS_NDADDR;
262 for (lbc = 0, i = ULFS_NIADDR;; i--, bn -= blockcnt) {
263 if (i == 0)
264 return (EFBIG);
265
266 lbc += lognindir;
267 blockcnt = (int64_t) 1 << lbc;
268
269 if (bn < blockcnt)
270 break;
271 }
272
273 /* Calculate the address of the first meta-block. */
274 metalbn = -((realbn >= 0 ? realbn : -realbn) - bn + ULFS_NIADDR - i);
275
276 /* At each iteration, off is the offset into the bap array which is an
277 * array of disk addresses at the current level of indirection. The
278 * logical block number and the offset in that block are stored into
279 * the argument array. */
280 ap->in_lbn = metalbn;
281 ap->in_off = off = ULFS_NIADDR - i;
282 ap->in_exists = 0;
283 ap++;
284 for (++numlevels; i <= ULFS_NIADDR; i++) {
285 /* If searching for a meta-data block, quit when found. */
286 if (metalbn == realbn)
287 break;
288
289 lbc -= lognindir;
290 blockcnt = (int64_t) 1 << lbc;
291 off = (bn >> lbc) & (lfs_sb_getnindir(fs) - 1);
292
293 ++numlevels;
294 ap->in_lbn = metalbn;
295 ap->in_off = off;
296 ap->in_exists = 0;
297 ++ap;
298
299 metalbn -= -1 + (off << lbc);
300 }
301 if (nump)
302 *nump = numlevels;
303 return (0);
304 }
305
306 int
307 lfs_vop_bmap(struct uvnode * vp, daddr_t lbn, daddr_t * daddrp)
308 {
309 return ulfs_bmaparray(vp->v_fs, vp, lbn, daddrp, NULL, NULL);
310 }
311
312 /* Search a block for a specific dinode. */
313 struct ulfs1_dinode *
314 lfs_ifind(struct lfs * fs, ino_t ino, struct ubuf * bp)
315 {
316 struct ulfs1_dinode *dip = (struct ulfs1_dinode *) bp->b_data;
317 struct ulfs1_dinode *ldip, *fin;
318
319 fin = dip + LFS_INOPB(fs);
320
321 /*
322 * Read the inode block backwards, since later versions of the
323 * inode will supercede earlier ones. Though it is unlikely, it is
324 * possible that the same inode will appear in the same inode block.
325 */
326 for (ldip = fin - 1; ldip >= dip; --ldip)
327 if (ldip->di_inumber == ino)
328 return (ldip);
329 return NULL;
330 }
331
332 /*
333 * lfs_raw_vget makes us a new vnode from the inode at the given disk address.
334 * XXX it currently loses atime information.
335 */
336 struct uvnode *
337 lfs_raw_vget(struct lfs * fs, ino_t ino, int fd, ulfs_daddr_t daddr)
338 {
339 struct uvnode *vp;
340 struct inode *ip;
341 struct ulfs1_dinode *dip;
342 struct ubuf *bp;
343 int i, hash;
344
345 vp = ecalloc(1, sizeof(*vp));
346 vp->v_fd = fd;
347 vp->v_fs = fs;
348 vp->v_usecount = 0;
349 vp->v_strategy_op = lfs_vop_strategy;
350 vp->v_bwrite_op = lfs_vop_bwrite;
351 vp->v_bmap_op = lfs_vop_bmap;
352 LIST_INIT(&vp->v_cleanblkhd);
353 LIST_INIT(&vp->v_dirtyblkhd);
354
355 ip = ecalloc(1, sizeof(*ip));
356
357 ip->i_din.ffs1_din = ecalloc(1, sizeof(*ip->i_din.ffs1_din));
358
359 /* Initialize the inode -- from lfs_vcreate. */
360 ip->inode_ext.lfs = ecalloc(1, sizeof(*ip->inode_ext.lfs));
361 vp->v_data = ip;
362 /* ip->i_vnode = vp; */
363 ip->i_number = ino;
364 ip->i_lockf = 0;
365 ip->i_lfs_effnblks = 0;
366 ip->i_flag = 0;
367
368 /* Load inode block and find inode */
369 if (daddr > 0) {
370 bread(fs->lfs_devvp, LFS_FSBTODB(fs, daddr), lfs_sb_getibsize(fs),
371 0, &bp);
372 bp->b_flags |= B_AGE;
373 dip = lfs_ifind(fs, ino, bp);
374 if (dip == NULL) {
375 brelse(bp, 0);
376 free(ip);
377 free(vp);
378 return NULL;
379 }
380 memcpy(ip->i_din.ffs1_din, dip, sizeof(*dip));
381 brelse(bp, 0);
382 }
383 ip->i_number = ino;
384 /* ip->i_devvp = fs->lfs_devvp; */
385 ip->i_lfs = fs;
386
387 ip->i_lfs_effnblks = ip->i_ffs1_blocks;
388 ip->i_lfs_osize = ip->i_ffs1_size;
389 #if 0
390 if (fs->lfs_version > 1) {
391 ip->i_ffs1_atime = ts.tv_sec;
392 ip->i_ffs1_atimensec = ts.tv_nsec;
393 }
394 #endif
395
396 memset(ip->i_lfs_fragsize, 0, ULFS_NDADDR * sizeof(*ip->i_lfs_fragsize));
397 for (i = 0; i < ULFS_NDADDR; i++)
398 if (ip->i_ffs1_db[i] != 0)
399 ip->i_lfs_fragsize[i] = lfs_blksize(fs, ip, i);
400
401 ++nvnodes;
402 hash = ((int)(intptr_t)fs + ino) & (VNODE_HASH_MAX - 1);
403 LIST_INSERT_HEAD(&getvnodelist[hash], vp, v_getvnodes);
404 LIST_INSERT_HEAD(&vnodelist, vp, v_mntvnodes);
405
406 return vp;
407 }
408
409 static struct uvnode *
410 lfs_vget(void *vfs, ino_t ino)
411 {
412 struct lfs *fs = (struct lfs *)vfs;
413 ulfs_daddr_t daddr;
414 struct ubuf *bp;
415 IFILE *ifp;
416
417 LFS_IENTRY(ifp, fs, ino, bp);
418 daddr = ifp->if_daddr;
419 brelse(bp, 0);
420 if (daddr <= 0 || lfs_dtosn(fs, daddr) >= lfs_sb_getnseg(fs))
421 return NULL;
422 return lfs_raw_vget(fs, ino, fs->lfs_ivnode->v_fd, daddr);
423 }
424
425 /* Check superblock magic number and checksum */
426 static int
427 check_sb(struct lfs *fs)
428 {
429 u_int32_t checksum;
430
431 if (fs->lfs_magic != LFS_MAGIC) {
432 printf("Superblock magic number (0x%lx) does not match "
433 "expected 0x%lx\n", (unsigned long) fs->lfs_magic,
434 (unsigned long) LFS_MAGIC);
435 return 1;
436 }
437 /* checksum */
438 checksum = lfs_sb_cksum(&(fs->lfs_dlfs));
439 if (lfs_sb_getcksum(fs) != checksum) {
440 printf("Superblock checksum (%lx) does not match computed checksum (%lx)\n",
441 (unsigned long) lfs_sb_getcksum(fs), (unsigned long) checksum);
442 return 1;
443 }
444 return 0;
445 }
446
447 /* Initialize LFS library; load superblocks and choose which to use. */
448 struct lfs *
449 lfs_init(int devfd, daddr_t sblkno, daddr_t idaddr, int dummy_read, int debug)
450 {
451 struct uvnode *devvp;
452 struct ubuf *bp;
453 int tryalt;
454 struct lfs *fs, *altfs;
455
456 vfs_init();
457
458 devvp = ecalloc(1, sizeof(*devvp));
459 devvp->v_fs = NULL;
460 devvp->v_fd = devfd;
461 devvp->v_strategy_op = raw_vop_strategy;
462 devvp->v_bwrite_op = raw_vop_bwrite;
463 devvp->v_bmap_op = raw_vop_bmap;
464 LIST_INIT(&devvp->v_cleanblkhd);
465 LIST_INIT(&devvp->v_dirtyblkhd);
466
467 tryalt = 0;
468 if (dummy_read) {
469 if (sblkno == 0)
470 sblkno = LFS_LABELPAD / dev_bsize;
471 fs = ecalloc(1, sizeof(*fs));
472 fs->lfs_devvp = devvp;
473 } else {
474 if (sblkno == 0) {
475 sblkno = LFS_LABELPAD / dev_bsize;
476 tryalt = 1;
477 } else if (debug) {
478 printf("No -b flag given, not attempting to verify checkpoint\n");
479 }
480
481 dev_bsize = DEV_BSIZE;
482
483 (void)bread(devvp, sblkno, LFS_SBPAD, 0, &bp);
484 fs = ecalloc(1, sizeof(*fs));
485 fs->lfs_dlfs = *((struct dlfs *) bp->b_data);
486 fs->lfs_devvp = devvp;
487 bp->b_flags |= B_INVAL;
488 brelse(bp, 0);
489
490 dev_bsize = lfs_sb_getfsize(fs) >> lfs_sb_getfsbtodb(fs);
491
492 if (tryalt) {
493 (void)bread(devvp, LFS_FSBTODB(fs, lfs_sb_getsboff(fs, 1)),
494 LFS_SBPAD, 0, &bp);
495 altfs = ecalloc(1, sizeof(*altfs));
496 altfs->lfs_dlfs = *((struct dlfs *) bp->b_data);
497 altfs->lfs_devvp = devvp;
498 bp->b_flags |= B_INVAL;
499 brelse(bp, 0);
500
501 if (check_sb(fs) || lfs_sb_getidaddr(fs) <= 0) {
502 if (debug)
503 printf("Primary superblock is no good, using first alternate\n");
504 free(fs);
505 fs = altfs;
506 } else {
507 /* If both superblocks check out, try verification */
508 if (check_sb(altfs)) {
509 if (debug)
510 printf("First alternate superblock is no good, using primary\n");
511 free(altfs);
512 } else {
513 if (lfs_verify(fs, altfs, devvp, debug) == fs) {
514 free(altfs);
515 } else {
516 free(fs);
517 fs = altfs;
518 }
519 }
520 }
521 }
522 if (check_sb(fs)) {
523 free(fs);
524 return NULL;
525 }
526 }
527
528 /* Compatibility */
529 if (lfs_sb_getversion(fs) < 2) {
530 lfs_sb_setsumsize(fs, LFS_V1_SUMMARY_SIZE);
531 lfs_sb_setibsize(fs, lfs_sb_getbsize(fs));
532 lfs_sb_sets0addr(fs, lfs_sb_getsboff(fs, 0));
533 lfs_sb_settstamp(fs, lfs_sb_getotstamp(fs));
534 lfs_sb_setfsbtodb(fs, 0);
535 }
536
537 if (!dummy_read) {
538 fs->lfs_suflags = emalloc(2 * sizeof(u_int32_t *));
539 fs->lfs_suflags[0] = emalloc(lfs_sb_getnseg(fs) * sizeof(u_int32_t));
540 fs->lfs_suflags[1] = emalloc(lfs_sb_getnseg(fs) * sizeof(u_int32_t));
541 }
542
543 if (idaddr == 0)
544 idaddr = lfs_sb_getidaddr(fs);
545 else
546 lfs_sb_setidaddr(fs, idaddr);
547 /* NB: If dummy_read!=0, idaddr==0 here so we get a fake inode. */
548 fs->lfs_ivnode = lfs_raw_vget(fs,
549 (dummy_read ? LFS_IFILE_INUM : lfs_sb_getifile(fs)),
550 devvp->v_fd, idaddr);
551 if (fs->lfs_ivnode == NULL)
552 return NULL;
553
554 register_vget((void *)fs, lfs_vget);
555
556 return fs;
557 }
558
559 /*
560 * Check partial segment validity between fs->lfs_offset and the given goal.
561 *
562 * If goal == 0, just keep on going until the segments stop making sense,
563 * and return the address of the last valid partial segment.
564 *
565 * If goal != 0, return the address of the first partial segment that failed,
566 * or "goal" if we reached it without failure (the partial segment *at* goal
567 * need not be valid).
568 */
569 ulfs_daddr_t
570 try_verify(struct lfs *osb, struct uvnode *devvp, ulfs_daddr_t goal, int debug)
571 {
572 ulfs_daddr_t daddr, odaddr;
573 SEGSUM *sp;
574 int i, bc, hitclean;
575 struct ubuf *bp;
576 ulfs_daddr_t nodirop_daddr;
577 u_int64_t serial;
578
579 bc = 0;
580 hitclean = 0;
581 odaddr = -1;
582 daddr = lfs_sb_getoffset(osb);
583 nodirop_daddr = daddr;
584 serial = lfs_sb_getserial(osb);
585 while (daddr != goal) {
586 /*
587 * Don't mistakenly read a superblock, if there is one here.
588 */
589 if (lfs_sntod(osb, lfs_dtosn(osb, daddr)) == daddr) {
590 if (daddr == lfs_sb_gets0addr(osb))
591 daddr += lfs_btofsb(osb, LFS_LABELPAD);
592 for (i = 0; i < LFS_MAXNUMSB; i++) {
593 if (lfs_sb_getsboff(osb, i) < daddr)
594 break;
595 if (lfs_sb_getsboff(osb, i) == daddr)
596 daddr += lfs_btofsb(osb, LFS_SBPAD);
597 }
598 }
599
600 /* Read in summary block */
601 bread(devvp, LFS_FSBTODB(osb, daddr), lfs_sb_getsumsize(osb),
602 0, &bp);
603 sp = (SEGSUM *)bp->b_data;
604
605 /*
606 * Check for a valid segment summary belonging to our fs.
607 */
608 if (sp->ss_magic != SS_MAGIC ||
609 sp->ss_ident != lfs_sb_getident(osb) ||
610 sp->ss_serial < serial || /* XXX strengthen this */
611 sp->ss_sumsum != cksum(&sp->ss_datasum, lfs_sb_getsumsize(osb) -
612 sizeof(sp->ss_sumsum))) {
613 brelse(bp, 0);
614 if (debug) {
615 if (sp->ss_magic != SS_MAGIC)
616 pwarn("pseg at 0x%jx: "
617 "wrong magic number\n",
618 (uintmax_t)daddr);
619 else if (sp->ss_ident != lfs_sb_getident(osb))
620 pwarn("pseg at 0x%jx: "
621 "expected ident %jx, got %jx\n",
622 (uintmax_t)daddr,
623 (uintmax_t)sp->ss_ident,
624 (uintmax_t)lfs_sb_getident(osb));
625 else if (sp->ss_serial >= serial)
626 pwarn("pseg at 0x%jx: "
627 "serial %d < %d\n",
628 (uintmax_t)daddr,
629 (int)sp->ss_serial, (int)serial);
630 else
631 pwarn("pseg at 0x%jx: "
632 "summary checksum wrong\n",
633 (uintmax_t)daddr);
634 }
635 break;
636 }
637 if (debug && sp->ss_serial != serial)
638 pwarn("warning, serial=%d ss_serial=%d\n",
639 (int)serial, (int)sp->ss_serial);
640 ++serial;
641 bc = check_summary(osb, sp, daddr, debug, devvp, NULL);
642 if (bc == 0) {
643 brelse(bp, 0);
644 break;
645 }
646 if (debug)
647 pwarn("summary good: 0x%x/%d\n", (uintmax_t)daddr,
648 (int)sp->ss_serial);
649 assert (bc > 0);
650 odaddr = daddr;
651 daddr += lfs_btofsb(osb, lfs_sb_getsumsize(osb) + bc);
652 if (lfs_dtosn(osb, odaddr) != lfs_dtosn(osb, daddr) ||
653 lfs_dtosn(osb, daddr) != lfs_dtosn(osb, daddr +
654 lfs_btofsb(osb, lfs_sb_getsumsize(osb) + lfs_sb_getbsize(osb)) - 1)) {
655 daddr = sp->ss_next;
656 }
657
658 /*
659 * Check for the beginning and ending of a sequence of
660 * dirops. Writes from the cleaner never involve new
661 * information, and are always checkpoints; so don't try
662 * to roll forward through them. Likewise, psegs written
663 * by a previous roll-forward attempt are not interesting.
664 */
665 if (sp->ss_flags & (SS_CLEAN | SS_RFW))
666 hitclean = 1;
667 if (hitclean == 0 && (sp->ss_flags & SS_CONT) == 0)
668 nodirop_daddr = daddr;
669
670 brelse(bp, 0);
671 }
672
673 if (goal == 0)
674 return nodirop_daddr;
675 else
676 return daddr;
677 }
678
679 /* Use try_verify to check whether the newer superblock is valid. */
680 struct lfs *
681 lfs_verify(struct lfs *sb0, struct lfs *sb1, struct uvnode *devvp, int debug)
682 {
683 ulfs_daddr_t daddr;
684 struct lfs *osb, *nsb;
685
686 /*
687 * Verify the checkpoint of the newer superblock,
688 * if the timestamp/serial number of the two superblocks is
689 * different.
690 */
691
692 osb = NULL;
693 if (debug)
694 pwarn("sb0 %ju, sb1 %ju",
695 (uintmax_t) lfs_sb_getserial(sb0),
696 (uintmax_t) lfs_sb_getserial(sb1));
697
698 if ((lfs_sb_getversion(sb0) == 1 &&
699 lfs_sb_getotstamp(sb0) != lfs_sb_getotstamp(sb1)) ||
700 (lfs_sb_getversion(sb0) > 1 &&
701 lfs_sb_getserial(sb0) != lfs_sb_getserial(sb1))) {
702 if (lfs_sb_getversion(sb0) == 1) {
703 if (lfs_sb_getotstamp(sb0) > lfs_sb_getotstamp(sb1)) {
704 osb = sb1;
705 nsb = sb0;
706 } else {
707 osb = sb0;
708 nsb = sb1;
709 }
710 } else {
711 if (lfs_sb_getserial(sb0) > lfs_sb_getserial(sb1)) {
712 osb = sb1;
713 nsb = sb0;
714 } else {
715 osb = sb0;
716 nsb = sb1;
717 }
718 }
719 if (debug) {
720 printf("Attempting to verify newer checkpoint...");
721 fflush(stdout);
722 }
723 daddr = try_verify(osb, devvp, lfs_sb_getoffset(nsb), debug);
724
725 if (debug)
726 printf("done.\n");
727 if (daddr == lfs_sb_getoffset(nsb)) {
728 pwarn("** Newer checkpoint verified; recovered %jd seconds of data\n",
729 (intmax_t)(lfs_sb_gettstamp(nsb) - lfs_sb_gettstamp(osb)));
730 sbdirty();
731 } else {
732 pwarn("** Newer checkpoint invalid; lost %jd seconds of data\n", (intmax_t)(lfs_sb_gettstamp(nsb) - lfs_sb_gettstamp(osb)));
733 }
734 return (daddr == lfs_sb_getoffset(nsb) ? nsb : osb);
735 }
736 /* Nothing to check */
737 return osb;
738 }
739
740 /* Verify a partial-segment summary; return the number of bytes on disk. */
741 int
742 check_summary(struct lfs *fs, SEGSUM *sp, ulfs_daddr_t pseg_addr, int debug,
743 struct uvnode *devvp, void (func(ulfs_daddr_t, FINFO *)))
744 {
745 FINFO *fp;
746 int bc; /* Bytes in partial segment */
747 int nblocks;
748 ulfs_daddr_t daddr;
749 ulfs_daddr_t *dp, *idp;
750 struct ubuf *bp;
751 int i, j, k, datac, len;
752 u_int32_t *datap;
753 u_int32_t ccksum;
754
755 /* We've already checked the sumsum, just do the data bounds and sum */
756
757 /* Count the blocks. */
758 nblocks = howmany(sp->ss_ninos, LFS_INOPB(fs));
759 bc = nblocks << (lfs_sb_getversion(fs) > 1 ? lfs_sb_getffshift(fs) : lfs_sb_getbshift(fs));
760 assert(bc >= 0);
761
762 fp = (FINFO *) (sp + 1);
763 for (i = 0; i < sp->ss_nfinfo; i++) {
764 nblocks += fp->fi_nblocks;
765 bc += fp->fi_lastlength + ((fp->fi_nblocks - 1)
766 << lfs_sb_getbshift(fs));
767 assert(bc >= 0);
768 fp = (FINFO *) (fp->fi_blocks + fp->fi_nblocks);
769 if (((char *)fp) - (char *)sp > lfs_sb_getsumsize(fs))
770 return 0;
771 }
772 datap = emalloc(nblocks * sizeof(*datap));
773 datac = 0;
774
775 dp = (ulfs_daddr_t *) sp;
776 dp += lfs_sb_getsumsize(fs) / sizeof(ulfs_daddr_t);
777 dp--;
778
779 idp = dp;
780 daddr = pseg_addr + lfs_btofsb(fs, lfs_sb_getsumsize(fs));
781 fp = (FINFO *) (sp + 1);
782 for (i = 0, j = 0;
783 i < sp->ss_nfinfo || j < howmany(sp->ss_ninos, LFS_INOPB(fs)); i++) {
784 if (i >= sp->ss_nfinfo && *idp != daddr) {
785 pwarn("Not enough inode blocks in pseg at 0x%" PRIx32
786 ": found %d, wanted %d\n",
787 pseg_addr, j, howmany(sp->ss_ninos, LFS_INOPB(fs)));
788 if (debug)
789 pwarn("*idp=%x, daddr=%" PRIx32 "\n", *idp,
790 daddr);
791 break;
792 }
793 while (j < howmany(sp->ss_ninos, LFS_INOPB(fs)) && *idp == daddr) {
794 bread(devvp, LFS_FSBTODB(fs, daddr), lfs_sb_getibsize(fs),
795 0, &bp);
796 datap[datac++] = ((u_int32_t *) (bp->b_data))[0];
797 brelse(bp, 0);
798
799 ++j;
800 daddr += lfs_btofsb(fs, lfs_sb_getibsize(fs));
801 --idp;
802 }
803 if (i < sp->ss_nfinfo) {
804 if (func)
805 func(daddr, fp);
806 for (k = 0; k < fp->fi_nblocks; k++) {
807 len = (k == fp->fi_nblocks - 1 ?
808 fp->fi_lastlength
809 : lfs_sb_getbsize(fs));
810 bread(devvp, LFS_FSBTODB(fs, daddr), len,
811 0, &bp);
812 datap[datac++] = ((u_int32_t *) (bp->b_data))[0];
813 brelse(bp, 0);
814 daddr += lfs_btofsb(fs, len);
815 }
816 fp = (FINFO *) (fp->fi_blocks + fp->fi_nblocks);
817 }
818 }
819
820 if (datac != nblocks) {
821 pwarn("Partial segment at 0x%jx expected %d blocks counted %d\n",
822 (intmax_t)pseg_addr, nblocks, datac);
823 }
824 ccksum = cksum(datap, nblocks * sizeof(u_int32_t));
825 /* Check the data checksum */
826 if (ccksum != sp->ss_datasum) {
827 pwarn("Partial segment at 0x%jx data checksum"
828 " mismatch: given 0x%x, computed 0x%x\n",
829 (uintmax_t)pseg_addr, sp->ss_datasum, ccksum);
830 free(datap);
831 return 0;
832 }
833 free(datap);
834 assert(bc >= 0);
835 return bc;
836 }
837
838 /* print message and exit */
839 void
840 my_vpanic(int fatal, const char *fmt, va_list ap)
841 {
842 (void) vprintf(fmt, ap);
843 exit(8);
844 }
845
846 void
847 call_panic(const char *fmt, ...)
848 {
849 va_list ap;
850
851 va_start(ap, fmt);
852 panic_func(1, fmt, ap);
853 va_end(ap);
854 }
855
856 /* Allocate a new inode. */
857 struct uvnode *
858 lfs_valloc(struct lfs *fs, ino_t ino)
859 {
860 struct ubuf *bp, *cbp;
861 struct ifile *ifp;
862 ino_t new_ino;
863 int error;
864 CLEANERINFO *cip;
865
866 /* Get the head of the freelist. */
867 LFS_GET_HEADFREE(fs, cip, cbp, &new_ino);
868
869 /*
870 * Remove the inode from the free list and write the new start
871 * of the free list into the superblock.
872 */
873 LFS_IENTRY(ifp, fs, new_ino, bp);
874 if (ifp->if_daddr != LFS_UNUSED_DADDR)
875 panic("lfs_valloc: inuse inode %d on the free list", new_ino);
876 LFS_PUT_HEADFREE(fs, cip, cbp, ifp->if_nextfree);
877
878 brelse(bp, 0);
879
880 /* Extend IFILE so that the next lfs_valloc will succeed. */
881 if (lfs_sb_getfreehd(fs) == LFS_UNUSED_INUM) {
882 if ((error = extend_ifile(fs)) != 0) {
883 LFS_PUT_HEADFREE(fs, cip, cbp, new_ino);
884 return NULL;
885 }
886 }
887
888 /* Set superblock modified bit and increment file count. */
889 sbdirty();
890 lfs_sb_addnfiles(fs, 1);
891
892 return lfs_raw_vget(fs, ino, fs->lfs_devvp->v_fd, 0x0);
893 }
894
895 #ifdef IN_FSCK_LFS
896 void reset_maxino(ino_t);
897 #endif
898
899 /*
900 * Add a new block to the Ifile, to accommodate future file creations.
901 */
902 int
903 extend_ifile(struct lfs *fs)
904 {
905 struct uvnode *vp;
906 struct inode *ip;
907 IFILE *ifp;
908 IFILE_V1 *ifp_v1;
909 struct ubuf *bp, *cbp;
910 daddr_t i, blkno, max;
911 ino_t oldlast;
912 CLEANERINFO *cip;
913
914 vp = fs->lfs_ivnode;
915 ip = VTOI(vp);
916 blkno = lfs_lblkno(fs, ip->i_ffs1_size);
917
918 lfs_balloc(vp, ip->i_ffs1_size, lfs_sb_getbsize(fs), &bp);
919 ip->i_ffs1_size += lfs_sb_getbsize(fs);
920 ip->i_flag |= IN_MODIFIED;
921
922 i = (blkno - lfs_sb_getsegtabsz(fs) - lfs_sb_getcleansz(fs)) *
923 lfs_sb_getifpb(fs);
924 LFS_GET_HEADFREE(fs, cip, cbp, &oldlast);
925 LFS_PUT_HEADFREE(fs, cip, cbp, i);
926 max = i + lfs_sb_getifpb(fs);
927 lfs_sb_subbfree(fs, lfs_btofsb(fs, lfs_sb_getbsize(fs)));
928
929 if (lfs_sb_getversion(fs) == 1) {
930 for (ifp_v1 = (IFILE_V1 *)bp->b_data; i < max; ++ifp_v1) {
931 ifp_v1->if_version = 1;
932 ifp_v1->if_daddr = LFS_UNUSED_DADDR;
933 ifp_v1->if_nextfree = ++i;
934 }
935 ifp_v1--;
936 ifp_v1->if_nextfree = oldlast;
937 } else {
938 for (ifp = (IFILE *)bp->b_data; i < max; ++ifp) {
939 ifp->if_version = 1;
940 ifp->if_daddr = LFS_UNUSED_DADDR;
941 ifp->if_nextfree = ++i;
942 }
943 ifp--;
944 ifp->if_nextfree = oldlast;
945 }
946 LFS_PUT_TAILFREE(fs, cip, cbp, max - 1);
947
948 LFS_BWRITE_LOG(bp);
949
950 #ifdef IN_FSCK_LFS
951 reset_maxino(((ip->i_ffs1_size >> lfs_sb_getbshift(fs))
952 - lfs_sb_getsegtabsz(fs)
953 - lfs_sb_getcleansz(fs)) * lfs_sb_getifpb(fs));
954 #endif
955 return 0;
956 }
957
958 /*
959 * Allocate a block, and to inode and filesystem block accounting for it
960 * and for any indirect blocks the may need to be created in order for
961 * this block to be created.
962 *
963 * Blocks which have never been accounted for (i.e., which "do not exist")
964 * have disk address 0, which is translated by ulfs_bmap to the special value
965 * UNASSIGNED == -1, as in the historical ULFS.
966 *
967 * Blocks which have been accounted for but which have not yet been written
968 * to disk are given the new special disk address UNWRITTEN == -2, so that
969 * they can be differentiated from completely new blocks.
970 */
971 int
972 lfs_balloc(struct uvnode *vp, off_t startoffset, int iosize, struct ubuf **bpp)
973 {
974 int offset;
975 daddr_t daddr, idaddr;
976 struct ubuf *ibp, *bp;
977 struct inode *ip;
978 struct lfs *fs;
979 struct indir indirs[ULFS_NIADDR+2], *idp;
980 daddr_t lbn, lastblock;
981 int bcount;
982 int error, frags, i, nsize, osize, num;
983
984 ip = VTOI(vp);
985 fs = ip->i_lfs;
986 offset = lfs_blkoff(fs, startoffset);
987 lbn = lfs_lblkno(fs, startoffset);
988
989 /*
990 * Three cases: it's a block beyond the end of file, it's a block in
991 * the file that may or may not have been assigned a disk address or
992 * we're writing an entire block.
993 *
994 * Note, if the daddr is UNWRITTEN, the block already exists in
995 * the cache (it was read or written earlier). If so, make sure
996 * we don't count it as a new block or zero out its contents. If
997 * it did not, make sure we allocate any necessary indirect
998 * blocks.
999 *
1000 * If we are writing a block beyond the end of the file, we need to
1001 * check if the old last block was a fragment. If it was, we need
1002 * to rewrite it.
1003 */
1004
1005 if (bpp)
1006 *bpp = NULL;
1007
1008 /* Check for block beyond end of file and fragment extension needed. */
1009 lastblock = lfs_lblkno(fs, ip->i_ffs1_size);
1010 if (lastblock < ULFS_NDADDR && lastblock < lbn) {
1011 osize = lfs_blksize(fs, ip, lastblock);
1012 if (osize < lfs_sb_getbsize(fs) && osize > 0) {
1013 if ((error = lfs_fragextend(vp, osize, lfs_sb_getbsize(fs),
1014 lastblock,
1015 (bpp ? &bp : NULL))))
1016 return (error);
1017 ip->i_ffs1_size = (lastblock + 1) * lfs_sb_getbsize(fs);
1018 ip->i_flag |= IN_CHANGE | IN_UPDATE;
1019 if (bpp)
1020 (void) VOP_BWRITE(bp);
1021 }
1022 }
1023
1024 /*
1025 * If the block we are writing is a direct block, it's the last
1026 * block in the file, and offset + iosize is less than a full
1027 * block, we can write one or more fragments. There are two cases:
1028 * the block is brand new and we should allocate it the correct
1029 * size or it already exists and contains some fragments and
1030 * may need to extend it.
1031 */
1032 if (lbn < ULFS_NDADDR && lfs_lblkno(fs, ip->i_ffs1_size) <= lbn) {
1033 osize = lfs_blksize(fs, ip, lbn);
1034 nsize = lfs_fragroundup(fs, offset + iosize);
1035 if (lfs_lblktosize(fs, lbn) >= ip->i_ffs1_size) {
1036 /* Brand new block or fragment */
1037 frags = lfs_numfrags(fs, nsize);
1038 if (bpp) {
1039 *bpp = bp = getblk(vp, lbn, nsize);
1040 bp->b_blkno = UNWRITTEN;
1041 }
1042 ip->i_lfs_effnblks += frags;
1043 lfs_sb_subbfree(fs, frags);
1044 ip->i_ffs1_db[lbn] = UNWRITTEN;
1045 } else {
1046 if (nsize <= osize) {
1047 /* No need to extend */
1048 if (bpp && (error = bread(vp, lbn, osize,
1049 0, &bp)))
1050 return error;
1051 } else {
1052 /* Extend existing block */
1053 if ((error =
1054 lfs_fragextend(vp, osize, nsize, lbn,
1055 (bpp ? &bp : NULL))))
1056 return error;
1057 }
1058 if (bpp)
1059 *bpp = bp;
1060 }
1061 return 0;
1062 }
1063
1064 error = ulfs_bmaparray(fs, vp, lbn, &daddr, &indirs[0], &num);
1065 if (error)
1066 return (error);
1067
1068 daddr = (daddr_t)((int32_t)daddr); /* XXX ondisk32 */
1069
1070 /*
1071 * Do byte accounting all at once, so we can gracefully fail *before*
1072 * we start assigning blocks.
1073 */
1074 frags = LFS_FSBTODB(fs, 1); /* frags = VFSTOULFS(vp->v_mount)->um_seqinc; */
1075 bcount = 0;
1076 if (daddr == UNASSIGNED) {
1077 bcount = frags;
1078 }
1079 for (i = 1; i < num; ++i) {
1080 if (!indirs[i].in_exists) {
1081 bcount += frags;
1082 }
1083 }
1084 lfs_sb_subbfree(fs, bcount);
1085 ip->i_lfs_effnblks += bcount;
1086
1087 if (daddr == UNASSIGNED) {
1088 if (num > 0 && ip->i_ffs1_ib[indirs[0].in_off] == 0) {
1089 ip->i_ffs1_ib[indirs[0].in_off] = UNWRITTEN;
1090 }
1091
1092 /*
1093 * Create new indirect blocks if necessary
1094 */
1095 if (num > 1) {
1096 idaddr = ip->i_ffs1_ib[indirs[0].in_off];
1097 for (i = 1; i < num; ++i) {
1098 ibp = getblk(vp, indirs[i].in_lbn,
1099 lfs_sb_getbsize(fs));
1100 if (!indirs[i].in_exists) {
1101 memset(ibp->b_data, 0, ibp->b_bufsize);
1102 ibp->b_blkno = UNWRITTEN;
1103 } else if (!(ibp->b_flags & (B_DELWRI | B_DONE))) {
1104 ibp->b_blkno = LFS_FSBTODB(fs, idaddr);
1105 ibp->b_flags |= B_READ;
1106 VOP_STRATEGY(ibp);
1107 }
1108 /*
1109 * This block exists, but the next one may not.
1110 * If that is the case mark it UNWRITTEN to
1111 * keep the accounting straight.
1112 */
1113 /* XXX ondisk32 */
1114 if (((int32_t *)ibp->b_data)[indirs[i].in_off] == 0)
1115 ((int32_t *)ibp->b_data)[indirs[i].in_off] =
1116 UNWRITTEN;
1117 /* XXX ondisk32 */
1118 idaddr = ((int32_t *)ibp->b_data)[indirs[i].in_off];
1119 if ((error = VOP_BWRITE(ibp)))
1120 return error;
1121 }
1122 }
1123 }
1124
1125
1126 /*
1127 * Get the existing block from the cache, if requested.
1128 */
1129 if (bpp)
1130 *bpp = bp = getblk(vp, lbn, lfs_blksize(fs, ip, lbn));
1131
1132 /*
1133 * The block we are writing may be a brand new block
1134 * in which case we need to do accounting.
1135 *
1136 * We can tell a truly new block because ulfs_bmaparray will say
1137 * it is UNASSIGNED. Once we allocate it we will assign it the
1138 * disk address UNWRITTEN.
1139 */
1140 if (daddr == UNASSIGNED) {
1141 if (bpp) {
1142 /* Note the new address */
1143 bp->b_blkno = UNWRITTEN;
1144 }
1145
1146 switch (num) {
1147 case 0:
1148 ip->i_ffs1_db[lbn] = UNWRITTEN;
1149 break;
1150 case 1:
1151 ip->i_ffs1_ib[indirs[0].in_off] = UNWRITTEN;
1152 break;
1153 default:
1154 idp = &indirs[num - 1];
1155 if (bread(vp, idp->in_lbn, lfs_sb_getbsize(fs), 0, &ibp))
1156 panic("lfs_balloc: bread bno %lld",
1157 (long long)idp->in_lbn);
1158 /* XXX ondisk32 */
1159 ((int32_t *)ibp->b_data)[idp->in_off] = UNWRITTEN;
1160 VOP_BWRITE(ibp);
1161 }
1162 } else if (bpp && !(bp->b_flags & (B_DONE|B_DELWRI))) {
1163 /*
1164 * Not a brand new block, also not in the cache;
1165 * read it in from disk.
1166 */
1167 if (iosize == lfs_sb_getbsize(fs))
1168 /* Optimization: I/O is unnecessary. */
1169 bp->b_blkno = daddr;
1170 else {
1171 /*
1172 * We need to read the block to preserve the
1173 * existing bytes.
1174 */
1175 bp->b_blkno = daddr;
1176 bp->b_flags |= B_READ;
1177 VOP_STRATEGY(bp);
1178 return 0;
1179 }
1180 }
1181
1182 return (0);
1183 }
1184
1185 int
1186 lfs_fragextend(struct uvnode *vp, int osize, int nsize, daddr_t lbn,
1187 struct ubuf **bpp)
1188 {
1189 struct inode *ip;
1190 struct lfs *fs;
1191 int frags;
1192 int error;
1193
1194 ip = VTOI(vp);
1195 fs = ip->i_lfs;
1196 frags = (long)lfs_numfrags(fs, nsize - osize);
1197 error = 0;
1198
1199 /*
1200 * If we are not asked to actually return the block, all we need
1201 * to do is allocate space for it. UBC will handle dirtying the
1202 * appropriate things and making sure it all goes to disk.
1203 * Don't bother to read in that case.
1204 */
1205 if (bpp && (error = bread(vp, lbn, osize, 0, bpp))) {
1206 brelse(*bpp, 0);
1207 goto out;
1208 }
1209
1210 lfs_sb_subbfree(fs, frags);
1211 ip->i_lfs_effnblks += frags;
1212 ip->i_flag |= IN_CHANGE | IN_UPDATE;
1213
1214 if (bpp) {
1215 (*bpp)->b_data = erealloc((*bpp)->b_data, nsize);
1216 (void)memset((*bpp)->b_data + osize, 0, nsize - osize);
1217 }
1218
1219 out:
1220 return (error);
1221 }
1222