lfs.c revision 1.38 1 /* $NetBSD: lfs.c,v 1.38 2013/06/06 00:54:49 dholland Exp $ */
2 /*-
3 * Copyright (c) 2003 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Konrad E. Schroder <perseant (at) hhhh.org>.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30 /*
31 * Copyright (c) 1989, 1991, 1993
32 * The Regents of the University of California. All rights reserved.
33 * (c) UNIX System Laboratories, Inc.
34 * All or some portions of this file are derived from material licensed
35 * to the University of California by American Telephone and Telegraph
36 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
37 * the permission of UNIX System Laboratories, Inc.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 *
63 * @(#)ufs_bmap.c 8.8 (Berkeley) 8/11/95
64 */
65
66
67 #include <sys/types.h>
68 #include <sys/param.h>
69 #include <sys/time.h>
70 #include <sys/buf.h>
71 #include <sys/mount.h>
72
73 #define vnode uvnode
74 #define _SYS_VNODE_H_ /* XXX */
75 #include <ufs/lfs/ulfs_inode.h>
76 #include <ufs/lfs/ulfsmount.h>
77 #include <ufs/lfs/lfs.h>
78 #undef vnode
79
80 #include <assert.h>
81 #include <err.h>
82 #include <errno.h>
83 #include <stdarg.h>
84 #include <stdio.h>
85 #include <stdlib.h>
86 #include <string.h>
87 #include <unistd.h>
88 #include <util.h>
89
90 #include "bufcache.h"
91 #include "vnode.h"
92 #include "lfs_user.h"
93 #include "segwrite.h"
94 #include "kernelops.h"
95
96 #define panic call_panic
97
98 extern u_int32_t cksum(void *, size_t);
99 extern u_int32_t lfs_sb_cksum(struct dlfs *);
100 extern void pwarn(const char *, ...);
101
102 extern struct uvnodelst vnodelist;
103 extern struct uvnodelst getvnodelist[VNODE_HASH_MAX];
104 extern int nvnodes;
105
106 long dev_bsize = DEV_BSIZE;
107
108 static int
109 lfs_fragextend(struct uvnode *, int, int, daddr_t, struct ubuf **);
110
111 int fsdirty = 0;
112 void (*panic_func)(int, const char *, va_list) = my_vpanic;
113
114 /*
115 * LFS buffer and uvnode operations
116 */
117
118 int
119 lfs_vop_strategy(struct ubuf * bp)
120 {
121 int count;
122
123 if (bp->b_flags & B_READ) {
124 count = kops.ko_pread(bp->b_vp->v_fd, bp->b_data, bp->b_bcount,
125 bp->b_blkno * dev_bsize);
126 if (count == bp->b_bcount)
127 bp->b_flags |= B_DONE;
128 } else {
129 count = kops.ko_pwrite(bp->b_vp->v_fd, bp->b_data, bp->b_bcount,
130 bp->b_blkno * dev_bsize);
131 if (count == 0) {
132 perror("pwrite");
133 return -1;
134 }
135 bp->b_flags &= ~B_DELWRI;
136 reassignbuf(bp, bp->b_vp);
137 }
138 return 0;
139 }
140
141 int
142 lfs_vop_bwrite(struct ubuf * bp)
143 {
144 struct lfs *fs;
145
146 fs = bp->b_vp->v_fs;
147 if (!(bp->b_flags & B_DELWRI)) {
148 fs->lfs_avail -= btofsb(fs, bp->b_bcount);
149 }
150 bp->b_flags |= B_DELWRI | B_LOCKED;
151 reassignbuf(bp, bp->b_vp);
152 brelse(bp, 0);
153 return 0;
154 }
155
156 /*
157 * ulfs_bmaparray does the bmap conversion, and if requested returns the
158 * array of logical blocks which must be traversed to get to a block.
159 * Each entry contains the offset into that block that gets you to the
160 * next block and the disk address of the block (if it is assigned).
161 */
162 int
163 ulfs_bmaparray(struct lfs * fs, struct uvnode * vp, daddr_t bn, daddr_t * bnp, struct indir * ap, int *nump)
164 {
165 struct inode *ip;
166 struct ubuf *bp;
167 struct indir a[ULFS_NIADDR + 1], *xap;
168 daddr_t daddr;
169 daddr_t metalbn;
170 int error, num;
171
172 ip = VTOI(vp);
173
174 if (bn >= 0 && bn < ULFS_NDADDR) {
175 if (nump != NULL)
176 *nump = 0;
177 *bnp = fsbtodb(fs, ip->i_ffs1_db[bn]);
178 if (*bnp == 0)
179 *bnp = -1;
180 return (0);
181 }
182 xap = ap == NULL ? a : ap;
183 if (!nump)
184 nump = #
185 if ((error = ulfs_getlbns(fs, vp, bn, xap, nump)) != 0)
186 return (error);
187
188 num = *nump;
189
190 /* Get disk address out of indirect block array */
191 daddr = ip->i_ffs1_ib[xap->in_off];
192
193 for (bp = NULL, ++xap; --num; ++xap) {
194 /* Exit the loop if there is no disk address assigned yet and
195 * the indirect block isn't in the cache, or if we were
196 * looking for an indirect block and we've found it. */
197
198 metalbn = xap->in_lbn;
199 if ((daddr == 0 && !incore(vp, metalbn)) || metalbn == bn)
200 break;
201 /*
202 * If we get here, we've either got the block in the cache
203 * or we have a disk address for it, go fetch it.
204 */
205 if (bp)
206 brelse(bp, 0);
207
208 xap->in_exists = 1;
209 bp = getblk(vp, metalbn, fs->lfs_bsize);
210
211 if (!(bp->b_flags & (B_DONE | B_DELWRI))) {
212 bp->b_blkno = fsbtodb(fs, daddr);
213 bp->b_flags |= B_READ;
214 VOP_STRATEGY(bp);
215 }
216 daddr = ((ulfs_daddr_t *) bp->b_data)[xap->in_off];
217 }
218 if (bp)
219 brelse(bp, 0);
220
221 daddr = fsbtodb(fs, (ulfs_daddr_t) daddr);
222 *bnp = daddr == 0 ? -1 : daddr;
223 return (0);
224 }
225
226 /*
227 * Create an array of logical block number/offset pairs which represent the
228 * path of indirect blocks required to access a data block. The first "pair"
229 * contains the logical block number of the appropriate single, double or
230 * triple indirect block and the offset into the inode indirect block array.
231 * Note, the logical block number of the inode single/double/triple indirect
232 * block appears twice in the array, once with the offset into the i_ffs1_ib and
233 * once with the offset into the page itself.
234 */
235 int
236 ulfs_getlbns(struct lfs * fs, struct uvnode * vp, daddr_t bn, struct indir * ap, int *nump)
237 {
238 daddr_t metalbn, realbn;
239 int64_t blockcnt;
240 int lbc;
241 int i, numlevels, off;
242 int lognindir, indir;
243
244 metalbn = 0; /* XXXGCC -Wuninitialized [sh3] */
245
246 if (nump)
247 *nump = 0;
248 numlevels = 0;
249 realbn = bn;
250 if (bn < 0)
251 bn = -bn;
252
253 lognindir = -1;
254 for (indir = fs->lfs_nindir; indir; indir >>= 1)
255 ++lognindir;
256
257 /* Determine the number of levels of indirection. After this loop is
258 * done, blockcnt indicates the number of data blocks possible at the
259 * given level of indirection, and ULFS_NIADDR - i is the number of levels
260 * of indirection needed to locate the requested block. */
261
262 bn -= ULFS_NDADDR;
263 for (lbc = 0, i = ULFS_NIADDR;; i--, bn -= blockcnt) {
264 if (i == 0)
265 return (EFBIG);
266
267 lbc += lognindir;
268 blockcnt = (int64_t) 1 << lbc;
269
270 if (bn < blockcnt)
271 break;
272 }
273
274 /* Calculate the address of the first meta-block. */
275 metalbn = -((realbn >= 0 ? realbn : -realbn) - bn + ULFS_NIADDR - i);
276
277 /* At each iteration, off is the offset into the bap array which is an
278 * array of disk addresses at the current level of indirection. The
279 * logical block number and the offset in that block are stored into
280 * the argument array. */
281 ap->in_lbn = metalbn;
282 ap->in_off = off = ULFS_NIADDR - i;
283 ap->in_exists = 0;
284 ap++;
285 for (++numlevels; i <= ULFS_NIADDR; i++) {
286 /* If searching for a meta-data block, quit when found. */
287 if (metalbn == realbn)
288 break;
289
290 lbc -= lognindir;
291 blockcnt = (int64_t) 1 << lbc;
292 off = (bn >> lbc) & (fs->lfs_nindir - 1);
293
294 ++numlevels;
295 ap->in_lbn = metalbn;
296 ap->in_off = off;
297 ap->in_exists = 0;
298 ++ap;
299
300 metalbn -= -1 + (off << lbc);
301 }
302 if (nump)
303 *nump = numlevels;
304 return (0);
305 }
306
307 int
308 lfs_vop_bmap(struct uvnode * vp, daddr_t lbn, daddr_t * daddrp)
309 {
310 return ulfs_bmaparray(vp->v_fs, vp, lbn, daddrp, NULL, NULL);
311 }
312
313 /* Search a block for a specific dinode. */
314 struct ulfs1_dinode *
315 lfs_ifind(struct lfs * fs, ino_t ino, struct ubuf * bp)
316 {
317 struct ulfs1_dinode *dip = (struct ulfs1_dinode *) bp->b_data;
318 struct ulfs1_dinode *ldip, *fin;
319
320 fin = dip + INOPB(fs);
321
322 /*
323 * Read the inode block backwards, since later versions of the
324 * inode will supercede earlier ones. Though it is unlikely, it is
325 * possible that the same inode will appear in the same inode block.
326 */
327 for (ldip = fin - 1; ldip >= dip; --ldip)
328 if (ldip->di_inumber == ino)
329 return (ldip);
330 return NULL;
331 }
332
333 /*
334 * lfs_raw_vget makes us a new vnode from the inode at the given disk address.
335 * XXX it currently loses atime information.
336 */
337 struct uvnode *
338 lfs_raw_vget(struct lfs * fs, ino_t ino, int fd, ulfs_daddr_t daddr)
339 {
340 struct uvnode *vp;
341 struct inode *ip;
342 struct ulfs1_dinode *dip;
343 struct ubuf *bp;
344 int i, hash;
345
346 vp = ecalloc(1, sizeof(*vp));
347 vp->v_fd = fd;
348 vp->v_fs = fs;
349 vp->v_usecount = 0;
350 vp->v_strategy_op = lfs_vop_strategy;
351 vp->v_bwrite_op = lfs_vop_bwrite;
352 vp->v_bmap_op = lfs_vop_bmap;
353 LIST_INIT(&vp->v_cleanblkhd);
354 LIST_INIT(&vp->v_dirtyblkhd);
355
356 ip = ecalloc(1, sizeof(*ip));
357
358 ip->i_din.ffs1_din = ecalloc(1, sizeof(*ip->i_din.ffs1_din));
359
360 /* Initialize the inode -- from lfs_vcreate. */
361 ip->inode_ext.lfs = ecalloc(1, sizeof(*ip->inode_ext.lfs));
362 vp->v_data = ip;
363 /* ip->i_vnode = vp; */
364 ip->i_number = ino;
365 ip->i_lockf = 0;
366 ip->i_lfs_effnblks = 0;
367 ip->i_flag = 0;
368
369 /* Load inode block and find inode */
370 if (daddr > 0) {
371 bread(fs->lfs_devvp, fsbtodb(fs, daddr), fs->lfs_ibsize,
372 NULL, 0, &bp);
373 bp->b_flags |= B_AGE;
374 dip = lfs_ifind(fs, ino, bp);
375 if (dip == NULL) {
376 brelse(bp, 0);
377 free(ip);
378 free(vp);
379 return NULL;
380 }
381 memcpy(ip->i_din.ffs1_din, dip, sizeof(*dip));
382 brelse(bp, 0);
383 }
384 ip->i_number = ino;
385 /* ip->i_devvp = fs->lfs_devvp; */
386 ip->i_lfs = fs;
387
388 ip->i_lfs_effnblks = ip->i_ffs1_blocks;
389 ip->i_lfs_osize = ip->i_ffs1_size;
390 #if 0
391 if (fs->lfs_version > 1) {
392 ip->i_ffs1_atime = ts.tv_sec;
393 ip->i_ffs1_atimensec = ts.tv_nsec;
394 }
395 #endif
396
397 memset(ip->i_lfs_fragsize, 0, ULFS_NDADDR * sizeof(*ip->i_lfs_fragsize));
398 for (i = 0; i < ULFS_NDADDR; i++)
399 if (ip->i_ffs1_db[i] != 0)
400 ip->i_lfs_fragsize[i] = blksize(fs, ip, i);
401
402 ++nvnodes;
403 hash = ((int)(intptr_t)fs + ino) & (VNODE_HASH_MAX - 1);
404 LIST_INSERT_HEAD(&getvnodelist[hash], vp, v_getvnodes);
405 LIST_INSERT_HEAD(&vnodelist, vp, v_mntvnodes);
406
407 return vp;
408 }
409
410 static struct uvnode *
411 lfs_vget(void *vfs, ino_t ino)
412 {
413 struct lfs *fs = (struct lfs *)vfs;
414 ulfs_daddr_t daddr;
415 struct ubuf *bp;
416 IFILE *ifp;
417
418 LFS_IENTRY(ifp, fs, ino, bp);
419 daddr = ifp->if_daddr;
420 brelse(bp, 0);
421 if (daddr <= 0 || dtosn(fs, daddr) >= fs->lfs_nseg)
422 return NULL;
423 return lfs_raw_vget(fs, ino, fs->lfs_ivnode->v_fd, daddr);
424 }
425
426 /* Check superblock magic number and checksum */
427 static int
428 check_sb(struct lfs *fs)
429 {
430 u_int32_t checksum;
431
432 if (fs->lfs_magic != LFS_MAGIC) {
433 printf("Superblock magic number (0x%lx) does not match "
434 "expected 0x%lx\n", (unsigned long) fs->lfs_magic,
435 (unsigned long) LFS_MAGIC);
436 return 1;
437 }
438 /* checksum */
439 checksum = lfs_sb_cksum(&(fs->lfs_dlfs));
440 if (fs->lfs_cksum != checksum) {
441 printf("Superblock checksum (%lx) does not match computed checksum (%lx)\n",
442 (unsigned long) fs->lfs_cksum, (unsigned long) checksum);
443 return 1;
444 }
445 return 0;
446 }
447
448 /* Initialize LFS library; load superblocks and choose which to use. */
449 struct lfs *
450 lfs_init(int devfd, daddr_t sblkno, daddr_t idaddr, int dummy_read, int debug)
451 {
452 struct uvnode *devvp;
453 struct ubuf *bp;
454 int tryalt;
455 struct lfs *fs, *altfs;
456 int error;
457
458 vfs_init();
459
460 devvp = ecalloc(1, sizeof(*devvp));
461 devvp->v_fs = NULL;
462 devvp->v_fd = devfd;
463 devvp->v_strategy_op = raw_vop_strategy;
464 devvp->v_bwrite_op = raw_vop_bwrite;
465 devvp->v_bmap_op = raw_vop_bmap;
466 LIST_INIT(&devvp->v_cleanblkhd);
467 LIST_INIT(&devvp->v_dirtyblkhd);
468
469 tryalt = 0;
470 if (dummy_read) {
471 if (sblkno == 0)
472 sblkno = LFS_LABELPAD / dev_bsize;
473 fs = ecalloc(1, sizeof(*fs));
474 fs->lfs_devvp = devvp;
475 } else {
476 if (sblkno == 0) {
477 sblkno = LFS_LABELPAD / dev_bsize;
478 tryalt = 1;
479 } else if (debug) {
480 printf("No -b flag given, not attempting to verify checkpoint\n");
481 }
482
483 dev_bsize = DEV_BSIZE;
484
485 error = bread(devvp, sblkno, LFS_SBPAD, NOCRED, 0, &bp);
486 fs = ecalloc(1, sizeof(*fs));
487 fs->lfs_dlfs = *((struct dlfs *) bp->b_data);
488 fs->lfs_devvp = devvp;
489 bp->b_flags |= B_INVAL;
490 brelse(bp, 0);
491
492 dev_bsize = fs->lfs_fsize >> fs->lfs_fsbtodb;
493
494 if (tryalt) {
495 error = bread(devvp, fsbtodb(fs, fs->lfs_sboffs[1]),
496 LFS_SBPAD, NOCRED, 0, &bp);
497 altfs = ecalloc(1, sizeof(*altfs));
498 altfs->lfs_dlfs = *((struct dlfs *) bp->b_data);
499 altfs->lfs_devvp = devvp;
500 bp->b_flags |= B_INVAL;
501 brelse(bp, 0);
502
503 if (check_sb(fs) || fs->lfs_idaddr <= 0) {
504 if (debug)
505 printf("Primary superblock is no good, using first alternate\n");
506 free(fs);
507 fs = altfs;
508 } else {
509 /* If both superblocks check out, try verification */
510 if (check_sb(altfs)) {
511 if (debug)
512 printf("First alternate superblock is no good, using primary\n");
513 free(altfs);
514 } else {
515 if (lfs_verify(fs, altfs, devvp, debug) == fs) {
516 free(altfs);
517 } else {
518 free(fs);
519 fs = altfs;
520 }
521 }
522 }
523 }
524 if (check_sb(fs)) {
525 free(fs);
526 return NULL;
527 }
528 }
529
530 /* Compatibility */
531 if (fs->lfs_version < 2) {
532 fs->lfs_sumsize = LFS_V1_SUMMARY_SIZE;
533 fs->lfs_ibsize = fs->lfs_bsize;
534 fs->lfs_start = fs->lfs_sboffs[0];
535 fs->lfs_tstamp = fs->lfs_otstamp;
536 fs->lfs_fsbtodb = 0;
537 }
538
539 if (!dummy_read) {
540 fs->lfs_suflags = emalloc(2 * sizeof(u_int32_t *));
541 fs->lfs_suflags[0] = emalloc(fs->lfs_nseg * sizeof(u_int32_t));
542 fs->lfs_suflags[1] = emalloc(fs->lfs_nseg * sizeof(u_int32_t));
543 }
544
545 if (idaddr == 0)
546 idaddr = fs->lfs_idaddr;
547 else
548 fs->lfs_idaddr = idaddr;
549 /* NB: If dummy_read!=0, idaddr==0 here so we get a fake inode. */
550 fs->lfs_ivnode = lfs_raw_vget(fs,
551 (dummy_read ? LFS_IFILE_INUM : fs->lfs_ifile), devvp->v_fd,
552 idaddr);
553 if (fs->lfs_ivnode == NULL)
554 return NULL;
555
556 register_vget((void *)fs, lfs_vget);
557
558 return fs;
559 }
560
561 /*
562 * Check partial segment validity between fs->lfs_offset and the given goal.
563 *
564 * If goal == 0, just keep on going until the segments stop making sense,
565 * and return the address of the last valid partial segment.
566 *
567 * If goal != 0, return the address of the first partial segment that failed,
568 * or "goal" if we reached it without failure (the partial segment *at* goal
569 * need not be valid).
570 */
571 ulfs_daddr_t
572 try_verify(struct lfs *osb, struct uvnode *devvp, ulfs_daddr_t goal, int debug)
573 {
574 ulfs_daddr_t daddr, odaddr;
575 SEGSUM *sp;
576 int i, bc, hitclean;
577 struct ubuf *bp;
578 ulfs_daddr_t nodirop_daddr;
579 u_int64_t serial;
580
581 bc = 0;
582 hitclean = 0;
583 odaddr = -1;
584 daddr = osb->lfs_offset;
585 nodirop_daddr = daddr;
586 serial = osb->lfs_serial;
587 while (daddr != goal) {
588 /*
589 * Don't mistakenly read a superblock, if there is one here.
590 */
591 if (sntod(osb, dtosn(osb, daddr)) == daddr) {
592 if (daddr == osb->lfs_start)
593 daddr += btofsb(osb, LFS_LABELPAD);
594 for (i = 0; i < LFS_MAXNUMSB; i++) {
595 if (osb->lfs_sboffs[i] < daddr)
596 break;
597 if (osb->lfs_sboffs[i] == daddr)
598 daddr += btofsb(osb, LFS_SBPAD);
599 }
600 }
601
602 /* Read in summary block */
603 bread(devvp, fsbtodb(osb, daddr), osb->lfs_sumsize,
604 NULL, 0, &bp);
605 sp = (SEGSUM *)bp->b_data;
606
607 /*
608 * Check for a valid segment summary belonging to our fs.
609 */
610 if (sp->ss_magic != SS_MAGIC ||
611 sp->ss_ident != osb->lfs_ident ||
612 sp->ss_serial < serial || /* XXX strengthen this */
613 sp->ss_sumsum != cksum(&sp->ss_datasum, osb->lfs_sumsize -
614 sizeof(sp->ss_sumsum))) {
615 brelse(bp, 0);
616 if (debug) {
617 if (sp->ss_magic != SS_MAGIC)
618 pwarn("pseg at 0x%x: "
619 "wrong magic number\n",
620 (int)daddr);
621 else if (sp->ss_ident != osb->lfs_ident)
622 pwarn("pseg at 0x%x: "
623 "expected ident %llx, got %llx\n",
624 (int)daddr,
625 (long long)sp->ss_ident,
626 (long long)osb->lfs_ident);
627 else if (sp->ss_serial >= serial)
628 pwarn("pseg at 0x%x: "
629 "serial %d < %d\n", (int)daddr,
630 (int)sp->ss_serial, (int)serial);
631 else
632 pwarn("pseg at 0x%x: "
633 "summary checksum wrong\n",
634 (int)daddr);
635 }
636 break;
637 }
638 if (debug && sp->ss_serial != serial)
639 pwarn("warning, serial=%d ss_serial=%d\n",
640 (int)serial, (int)sp->ss_serial);
641 ++serial;
642 bc = check_summary(osb, sp, daddr, debug, devvp, NULL);
643 if (bc == 0) {
644 brelse(bp, 0);
645 break;
646 }
647 if (debug)
648 pwarn("summary good: 0x%x/%d\n", (int)daddr,
649 (int)sp->ss_serial);
650 assert (bc > 0);
651 odaddr = daddr;
652 daddr += btofsb(osb, osb->lfs_sumsize + bc);
653 if (dtosn(osb, odaddr) != dtosn(osb, daddr) ||
654 dtosn(osb, daddr) != dtosn(osb, daddr +
655 btofsb(osb, osb->lfs_sumsize + osb->lfs_bsize) - 1)) {
656 daddr = sp->ss_next;
657 }
658
659 /*
660 * Check for the beginning and ending of a sequence of
661 * dirops. Writes from the cleaner never involve new
662 * information, and are always checkpoints; so don't try
663 * to roll forward through them. Likewise, psegs written
664 * by a previous roll-forward attempt are not interesting.
665 */
666 if (sp->ss_flags & (SS_CLEAN | SS_RFW))
667 hitclean = 1;
668 if (hitclean == 0 && (sp->ss_flags & SS_CONT) == 0)
669 nodirop_daddr = daddr;
670
671 brelse(bp, 0);
672 }
673
674 if (goal == 0)
675 return nodirop_daddr;
676 else
677 return daddr;
678 }
679
680 /* Use try_verify to check whether the newer superblock is valid. */
681 struct lfs *
682 lfs_verify(struct lfs *sb0, struct lfs *sb1, struct uvnode *devvp, int debug)
683 {
684 ulfs_daddr_t daddr;
685 struct lfs *osb, *nsb;
686
687 /*
688 * Verify the checkpoint of the newer superblock,
689 * if the timestamp/serial number of the two superblocks is
690 * different.
691 */
692
693 osb = NULL;
694 if (debug)
695 pwarn("sb0 %lld, sb1 %lld",
696 (long long) sb0->lfs_serial,
697 (long long) sb1->lfs_serial);
698
699 if ((sb0->lfs_version == 1 &&
700 sb0->lfs_otstamp != sb1->lfs_otstamp) ||
701 (sb0->lfs_version > 1 &&
702 sb0->lfs_serial != sb1->lfs_serial)) {
703 if (sb0->lfs_version == 1) {
704 if (sb0->lfs_otstamp > sb1->lfs_otstamp) {
705 osb = sb1;
706 nsb = sb0;
707 } else {
708 osb = sb0;
709 nsb = sb1;
710 }
711 } else {
712 if (sb0->lfs_serial > sb1->lfs_serial) {
713 osb = sb1;
714 nsb = sb0;
715 } else {
716 osb = sb0;
717 nsb = sb1;
718 }
719 }
720 if (debug) {
721 printf("Attempting to verify newer checkpoint...");
722 fflush(stdout);
723 }
724 daddr = try_verify(osb, devvp, nsb->lfs_offset, debug);
725
726 if (debug)
727 printf("done.\n");
728 if (daddr == nsb->lfs_offset) {
729 pwarn("** Newer checkpoint verified, recovered %lld seconds of data\n",
730 (long long) nsb->lfs_tstamp - (long long) osb->lfs_tstamp);
731 sbdirty();
732 } else {
733 pwarn("** Newer checkpoint invalid, lost %lld seconds of data\n", (long long) nsb->lfs_tstamp - (long long) osb->lfs_tstamp);
734 }
735 return (daddr == nsb->lfs_offset ? nsb : osb);
736 }
737 /* Nothing to check */
738 return osb;
739 }
740
741 /* Verify a partial-segment summary; return the number of bytes on disk. */
742 int
743 check_summary(struct lfs *fs, SEGSUM *sp, ulfs_daddr_t pseg_addr, int debug,
744 struct uvnode *devvp, void (func(ulfs_daddr_t, FINFO *)))
745 {
746 FINFO *fp;
747 int bc; /* Bytes in partial segment */
748 int nblocks;
749 ulfs_daddr_t seg_addr, daddr;
750 ulfs_daddr_t *dp, *idp;
751 struct ubuf *bp;
752 int i, j, k, datac, len;
753 long sn;
754 u_int32_t *datap;
755 u_int32_t ccksum;
756
757 sn = dtosn(fs, pseg_addr);
758 seg_addr = sntod(fs, sn);
759
760 /* We've already checked the sumsum, just do the data bounds and sum */
761
762 /* Count the blocks. */
763 nblocks = howmany(sp->ss_ninos, INOPB(fs));
764 bc = nblocks << (fs->lfs_version > 1 ? fs->lfs_ffshift : fs->lfs_bshift);
765 assert(bc >= 0);
766
767 fp = (FINFO *) (sp + 1);
768 for (i = 0; i < sp->ss_nfinfo; i++) {
769 nblocks += fp->fi_nblocks;
770 bc += fp->fi_lastlength + ((fp->fi_nblocks - 1)
771 << fs->lfs_bshift);
772 assert(bc >= 0);
773 fp = (FINFO *) (fp->fi_blocks + fp->fi_nblocks);
774 if (((char *)fp) - (char *)sp > fs->lfs_sumsize)
775 return 0;
776 }
777 datap = emalloc(nblocks * sizeof(*datap));
778 datac = 0;
779
780 dp = (ulfs_daddr_t *) sp;
781 dp += fs->lfs_sumsize / sizeof(ulfs_daddr_t);
782 dp--;
783
784 idp = dp;
785 daddr = pseg_addr + btofsb(fs, fs->lfs_sumsize);
786 fp = (FINFO *) (sp + 1);
787 for (i = 0, j = 0;
788 i < sp->ss_nfinfo || j < howmany(sp->ss_ninos, INOPB(fs)); i++) {
789 if (i >= sp->ss_nfinfo && *idp != daddr) {
790 pwarn("Not enough inode blocks in pseg at 0x%" PRIx32
791 ": found %d, wanted %d\n",
792 pseg_addr, j, howmany(sp->ss_ninos, INOPB(fs)));
793 if (debug)
794 pwarn("*idp=%x, daddr=%" PRIx32 "\n", *idp,
795 daddr);
796 break;
797 }
798 while (j < howmany(sp->ss_ninos, INOPB(fs)) && *idp == daddr) {
799 bread(devvp, fsbtodb(fs, daddr), fs->lfs_ibsize,
800 NOCRED, 0, &bp);
801 datap[datac++] = ((u_int32_t *) (bp->b_data))[0];
802 brelse(bp, 0);
803
804 ++j;
805 daddr += btofsb(fs, fs->lfs_ibsize);
806 --idp;
807 }
808 if (i < sp->ss_nfinfo) {
809 if (func)
810 func(daddr, fp);
811 for (k = 0; k < fp->fi_nblocks; k++) {
812 len = (k == fp->fi_nblocks - 1 ?
813 fp->fi_lastlength
814 : fs->lfs_bsize);
815 bread(devvp, fsbtodb(fs, daddr), len,
816 NOCRED, 0, &bp);
817 datap[datac++] = ((u_int32_t *) (bp->b_data))[0];
818 brelse(bp, 0);
819 daddr += btofsb(fs, len);
820 }
821 fp = (FINFO *) (fp->fi_blocks + fp->fi_nblocks);
822 }
823 }
824
825 if (datac != nblocks) {
826 pwarn("Partial segment at 0x%llx expected %d blocks counted %d\n",
827 (long long) pseg_addr, nblocks, datac);
828 }
829 ccksum = cksum(datap, nblocks * sizeof(u_int32_t));
830 /* Check the data checksum */
831 if (ccksum != sp->ss_datasum) {
832 pwarn("Partial segment at 0x%" PRIx32 " data checksum"
833 " mismatch: given 0x%x, computed 0x%x\n",
834 pseg_addr, sp->ss_datasum, ccksum);
835 free(datap);
836 return 0;
837 }
838 free(datap);
839 assert(bc >= 0);
840 return bc;
841 }
842
843 /* print message and exit */
844 void
845 my_vpanic(int fatal, const char *fmt, va_list ap)
846 {
847 (void) vprintf(fmt, ap);
848 exit(8);
849 }
850
851 void
852 call_panic(const char *fmt, ...)
853 {
854 va_list ap;
855
856 va_start(ap, fmt);
857 panic_func(1, fmt, ap);
858 va_end(ap);
859 }
860
861 /* Allocate a new inode. */
862 struct uvnode *
863 lfs_valloc(struct lfs *fs, ino_t ino)
864 {
865 struct ubuf *bp, *cbp;
866 struct ifile *ifp;
867 ino_t new_ino;
868 int error;
869 int new_gen;
870 CLEANERINFO *cip;
871
872 /* Get the head of the freelist. */
873 LFS_GET_HEADFREE(fs, cip, cbp, &new_ino);
874
875 /*
876 * Remove the inode from the free list and write the new start
877 * of the free list into the superblock.
878 */
879 LFS_IENTRY(ifp, fs, new_ino, bp);
880 if (ifp->if_daddr != LFS_UNUSED_DADDR)
881 panic("lfs_valloc: inuse inode %d on the free list", new_ino);
882 LFS_PUT_HEADFREE(fs, cip, cbp, ifp->if_nextfree);
883
884 new_gen = ifp->if_version; /* version was updated by vfree */
885 brelse(bp, 0);
886
887 /* Extend IFILE so that the next lfs_valloc will succeed. */
888 if (fs->lfs_freehd == LFS_UNUSED_INUM) {
889 if ((error = extend_ifile(fs)) != 0) {
890 LFS_PUT_HEADFREE(fs, cip, cbp, new_ino);
891 return NULL;
892 }
893 }
894
895 /* Set superblock modified bit and increment file count. */
896 sbdirty();
897 ++fs->lfs_nfiles;
898
899 return lfs_raw_vget(fs, ino, fs->lfs_devvp->v_fd, 0x0);
900 }
901
902 #ifdef IN_FSCK_LFS
903 void reset_maxino(ino_t);
904 #endif
905
906 /*
907 * Add a new block to the Ifile, to accommodate future file creations.
908 */
909 int
910 extend_ifile(struct lfs *fs)
911 {
912 struct uvnode *vp;
913 struct inode *ip;
914 IFILE *ifp;
915 IFILE_V1 *ifp_v1;
916 struct ubuf *bp, *cbp;
917 daddr_t i, blkno, max;
918 ino_t oldlast;
919 CLEANERINFO *cip;
920
921 vp = fs->lfs_ivnode;
922 ip = VTOI(vp);
923 blkno = lblkno(fs, ip->i_ffs1_size);
924
925 lfs_balloc(vp, ip->i_ffs1_size, fs->lfs_bsize, &bp);
926 ip->i_ffs1_size += fs->lfs_bsize;
927 ip->i_flag |= IN_MODIFIED;
928
929 i = (blkno - fs->lfs_segtabsz - fs->lfs_cleansz) *
930 fs->lfs_ifpb;
931 LFS_GET_HEADFREE(fs, cip, cbp, &oldlast);
932 LFS_PUT_HEADFREE(fs, cip, cbp, i);
933 max = i + fs->lfs_ifpb;
934 fs->lfs_bfree -= btofsb(fs, fs->lfs_bsize);
935
936 if (fs->lfs_version == 1) {
937 for (ifp_v1 = (IFILE_V1 *)bp->b_data; i < max; ++ifp_v1) {
938 ifp_v1->if_version = 1;
939 ifp_v1->if_daddr = LFS_UNUSED_DADDR;
940 ifp_v1->if_nextfree = ++i;
941 }
942 ifp_v1--;
943 ifp_v1->if_nextfree = oldlast;
944 } else {
945 for (ifp = (IFILE *)bp->b_data; i < max; ++ifp) {
946 ifp->if_version = 1;
947 ifp->if_daddr = LFS_UNUSED_DADDR;
948 ifp->if_nextfree = ++i;
949 }
950 ifp--;
951 ifp->if_nextfree = oldlast;
952 }
953 LFS_PUT_TAILFREE(fs, cip, cbp, max - 1);
954
955 LFS_BWRITE_LOG(bp);
956
957 #ifdef IN_FSCK_LFS
958 reset_maxino(((ip->i_ffs1_size >> fs->lfs_bshift) - fs->lfs_segtabsz -
959 fs->lfs_cleansz) * fs->lfs_ifpb);
960 #endif
961 return 0;
962 }
963
964 /*
965 * Allocate a block, and to inode and filesystem block accounting for it
966 * and for any indirect blocks the may need to be created in order for
967 * this block to be created.
968 *
969 * Blocks which have never been accounted for (i.e., which "do not exist")
970 * have disk address 0, which is translated by ulfs_bmap to the special value
971 * UNASSIGNED == -1, as in the historical ULFS.
972 *
973 * Blocks which have been accounted for but which have not yet been written
974 * to disk are given the new special disk address UNWRITTEN == -2, so that
975 * they can be differentiated from completely new blocks.
976 */
977 int
978 lfs_balloc(struct uvnode *vp, off_t startoffset, int iosize, struct ubuf **bpp)
979 {
980 int offset;
981 daddr_t daddr, idaddr;
982 struct ubuf *ibp, *bp;
983 struct inode *ip;
984 struct lfs *fs;
985 struct indir indirs[ULFS_NIADDR+2], *idp;
986 daddr_t lbn, lastblock;
987 int bcount;
988 int error, frags, i, nsize, osize, num;
989
990 ip = VTOI(vp);
991 fs = ip->i_lfs;
992 offset = blkoff(fs, startoffset);
993 lbn = lblkno(fs, startoffset);
994
995 /*
996 * Three cases: it's a block beyond the end of file, it's a block in
997 * the file that may or may not have been assigned a disk address or
998 * we're writing an entire block.
999 *
1000 * Note, if the daddr is UNWRITTEN, the block already exists in
1001 * the cache (it was read or written earlier). If so, make sure
1002 * we don't count it as a new block or zero out its contents. If
1003 * it did not, make sure we allocate any necessary indirect
1004 * blocks.
1005 *
1006 * If we are writing a block beyond the end of the file, we need to
1007 * check if the old last block was a fragment. If it was, we need
1008 * to rewrite it.
1009 */
1010
1011 if (bpp)
1012 *bpp = NULL;
1013
1014 /* Check for block beyond end of file and fragment extension needed. */
1015 lastblock = lblkno(fs, ip->i_ffs1_size);
1016 if (lastblock < ULFS_NDADDR && lastblock < lbn) {
1017 osize = blksize(fs, ip, lastblock);
1018 if (osize < fs->lfs_bsize && osize > 0) {
1019 if ((error = lfs_fragextend(vp, osize, fs->lfs_bsize,
1020 lastblock,
1021 (bpp ? &bp : NULL))))
1022 return (error);
1023 ip->i_ffs1_size = (lastblock + 1) * fs->lfs_bsize;
1024 ip->i_flag |= IN_CHANGE | IN_UPDATE;
1025 if (bpp)
1026 (void) VOP_BWRITE(bp);
1027 }
1028 }
1029
1030 /*
1031 * If the block we are writing is a direct block, it's the last
1032 * block in the file, and offset + iosize is less than a full
1033 * block, we can write one or more fragments. There are two cases:
1034 * the block is brand new and we should allocate it the correct
1035 * size or it already exists and contains some fragments and
1036 * may need to extend it.
1037 */
1038 if (lbn < ULFS_NDADDR && lblkno(fs, ip->i_ffs1_size) <= lbn) {
1039 osize = blksize(fs, ip, lbn);
1040 nsize = fragroundup(fs, offset + iosize);
1041 if (lblktosize(fs, lbn) >= ip->i_ffs1_size) {
1042 /* Brand new block or fragment */
1043 frags = numfrags(fs, nsize);
1044 if (bpp) {
1045 *bpp = bp = getblk(vp, lbn, nsize);
1046 bp->b_blkno = UNWRITTEN;
1047 }
1048 ip->i_lfs_effnblks += frags;
1049 fs->lfs_bfree -= frags;
1050 ip->i_ffs1_db[lbn] = UNWRITTEN;
1051 } else {
1052 if (nsize <= osize) {
1053 /* No need to extend */
1054 if (bpp && (error = bread(vp, lbn, osize,
1055 NOCRED, 0, &bp)))
1056 return error;
1057 } else {
1058 /* Extend existing block */
1059 if ((error =
1060 lfs_fragextend(vp, osize, nsize, lbn,
1061 (bpp ? &bp : NULL))))
1062 return error;
1063 }
1064 if (bpp)
1065 *bpp = bp;
1066 }
1067 return 0;
1068 }
1069
1070 error = ulfs_bmaparray(fs, vp, lbn, &daddr, &indirs[0], &num);
1071 if (error)
1072 return (error);
1073
1074 daddr = (daddr_t)((int32_t)daddr); /* XXX ondisk32 */
1075
1076 /*
1077 * Do byte accounting all at once, so we can gracefully fail *before*
1078 * we start assigning blocks.
1079 */
1080 frags = fsbtodb(fs, 1); /* frags = VFSTOULFS(vp->v_mount)->um_seqinc; */
1081 bcount = 0;
1082 if (daddr == UNASSIGNED) {
1083 bcount = frags;
1084 }
1085 for (i = 1; i < num; ++i) {
1086 if (!indirs[i].in_exists) {
1087 bcount += frags;
1088 }
1089 }
1090 fs->lfs_bfree -= bcount;
1091 ip->i_lfs_effnblks += bcount;
1092
1093 if (daddr == UNASSIGNED) {
1094 if (num > 0 && ip->i_ffs1_ib[indirs[0].in_off] == 0) {
1095 ip->i_ffs1_ib[indirs[0].in_off] = UNWRITTEN;
1096 }
1097
1098 /*
1099 * Create new indirect blocks if necessary
1100 */
1101 if (num > 1) {
1102 idaddr = ip->i_ffs1_ib[indirs[0].in_off];
1103 for (i = 1; i < num; ++i) {
1104 ibp = getblk(vp, indirs[i].in_lbn,
1105 fs->lfs_bsize);
1106 if (!indirs[i].in_exists) {
1107 memset(ibp->b_data, 0, ibp->b_bufsize);
1108 ibp->b_blkno = UNWRITTEN;
1109 } else if (!(ibp->b_flags & (B_DELWRI | B_DONE))) {
1110 ibp->b_blkno = fsbtodb(fs, idaddr);
1111 ibp->b_flags |= B_READ;
1112 VOP_STRATEGY(ibp);
1113 }
1114 /*
1115 * This block exists, but the next one may not.
1116 * If that is the case mark it UNWRITTEN to
1117 * keep the accounting straight.
1118 */
1119 /* XXX ondisk32 */
1120 if (((int32_t *)ibp->b_data)[indirs[i].in_off] == 0)
1121 ((int32_t *)ibp->b_data)[indirs[i].in_off] =
1122 UNWRITTEN;
1123 /* XXX ondisk32 */
1124 idaddr = ((int32_t *)ibp->b_data)[indirs[i].in_off];
1125 if ((error = VOP_BWRITE(ibp)))
1126 return error;
1127 }
1128 }
1129 }
1130
1131
1132 /*
1133 * Get the existing block from the cache, if requested.
1134 */
1135 if (bpp)
1136 *bpp = bp = getblk(vp, lbn, blksize(fs, ip, lbn));
1137
1138 /*
1139 * The block we are writing may be a brand new block
1140 * in which case we need to do accounting.
1141 *
1142 * We can tell a truly new block because ulfs_bmaparray will say
1143 * it is UNASSIGNED. Once we allocate it we will assign it the
1144 * disk address UNWRITTEN.
1145 */
1146 if (daddr == UNASSIGNED) {
1147 if (bpp) {
1148 /* Note the new address */
1149 bp->b_blkno = UNWRITTEN;
1150 }
1151
1152 switch (num) {
1153 case 0:
1154 ip->i_ffs1_db[lbn] = UNWRITTEN;
1155 break;
1156 case 1:
1157 ip->i_ffs1_ib[indirs[0].in_off] = UNWRITTEN;
1158 break;
1159 default:
1160 idp = &indirs[num - 1];
1161 if (bread(vp, idp->in_lbn, fs->lfs_bsize, NOCRED,
1162 0, &ibp))
1163 panic("lfs_balloc: bread bno %lld",
1164 (long long)idp->in_lbn);
1165 /* XXX ondisk32 */
1166 ((int32_t *)ibp->b_data)[idp->in_off] = UNWRITTEN;
1167 VOP_BWRITE(ibp);
1168 }
1169 } else if (bpp && !(bp->b_flags & (B_DONE|B_DELWRI))) {
1170 /*
1171 * Not a brand new block, also not in the cache;
1172 * read it in from disk.
1173 */
1174 if (iosize == fs->lfs_bsize)
1175 /* Optimization: I/O is unnecessary. */
1176 bp->b_blkno = daddr;
1177 else {
1178 /*
1179 * We need to read the block to preserve the
1180 * existing bytes.
1181 */
1182 bp->b_blkno = daddr;
1183 bp->b_flags |= B_READ;
1184 VOP_STRATEGY(bp);
1185 return 0;
1186 }
1187 }
1188
1189 return (0);
1190 }
1191
1192 int
1193 lfs_fragextend(struct uvnode *vp, int osize, int nsize, daddr_t lbn,
1194 struct ubuf **bpp)
1195 {
1196 struct inode *ip;
1197 struct lfs *fs;
1198 int frags;
1199 int error;
1200 size_t obufsize;
1201
1202 ip = VTOI(vp);
1203 fs = ip->i_lfs;
1204 frags = (long)numfrags(fs, nsize - osize);
1205 error = 0;
1206
1207 /*
1208 * If we are not asked to actually return the block, all we need
1209 * to do is allocate space for it. UBC will handle dirtying the
1210 * appropriate things and making sure it all goes to disk.
1211 * Don't bother to read in that case.
1212 */
1213 if (bpp && (error = bread(vp, lbn, osize, NOCRED, 0, bpp))) {
1214 brelse(*bpp, 0);
1215 goto out;
1216 }
1217
1218 fs->lfs_bfree -= frags;
1219 ip->i_lfs_effnblks += frags;
1220 ip->i_flag |= IN_CHANGE | IN_UPDATE;
1221
1222 if (bpp) {
1223 obufsize = (*bpp)->b_bufsize;
1224 (*bpp)->b_data = erealloc((*bpp)->b_data, nsize);
1225 (void)memset((*bpp)->b_data + osize, 0, nsize - osize);
1226 }
1227
1228 out:
1229 return (error);
1230 }
1231