resize_ffs.c revision 1.6 1 /* $NetBSD: resize_ffs.c,v 1.6 2004/01/05 23:23:33 jmmv Exp $ */
2 /* From sources sent on February 17, 2003 */
3 /*-
4 * As its sole author, I explicitly place this code in the public
5 * domain. Anyone may use it for any purpose (though I would
6 * appreciate credit where it is due).
7 *
8 * der Mouse
9 *
10 * mouse (at) rodents.montreal.qc.ca
11 * 7D C8 61 52 5D E7 2D 39 4E F1 31 3E E8 B3 27 4B
12 */
13 /*
14 * resize_ffs:
15 *
16 * Resize a filesystem. Is capable of both growing and shrinking.
17 *
18 * Usage: resize_ffs filesystem newsize
19 *
20 * Example: resize_ffs /dev/rsd1e 29574
21 *
22 * newsize is in DEV_BSIZE units (ie, disk sectors, usually 512 bytes
23 * each).
24 *
25 * Note: this currently requires gcc to build, since it is written
26 * depending on gcc-specific features, notably nested function
27 * definitions (which in at least a few cases depend on the lexical
28 * scoping gcc provides, so they can't be trivially moved outside).
29 *
30 * It will not do anything useful with filesystems in other than
31 * host-native byte order. This really should be fixed (it's largely
32 * a historical accident; the original version of this program is
33 * older than bi-endian support in FFS).
34 *
35 * Many thanks go to John Kohl <jtk (at) NetBSD.org> for finding bugs: the
36 * one responsible for the "realloccgblk: can't find blk in cyl"
37 * problem and a more minor one which left fs_dsize wrong when
38 * shrinking. (These actually indicate bugs in fsck too - it should
39 * have caught and fixed them.)
40 *
41 */
42
43 #include <stdio.h>
44 #include <errno.h>
45 #include <fcntl.h>
46 #include <stdlib.h>
47 #include <unistd.h>
48 #include <strings.h>
49 #include <err.h>
50 #include <sys/stat.h>
51 #include <sys/mman.h>
52 #include <sys/param.h> /* MAXFRAG */
53 #include <ufs/ffs/fs.h>
54 #include <ufs/ufs/dir.h>
55 #include <ufs/ufs/dinode.h>
56 #include <ufs/ufs/ufs_bswap.h> /* ufs_rw32 */
57
58 /* Suppress warnings about unused arguments */
59 #if defined(__GNUC__) && \
60 ( (__GNUC__ > 2) || \
61 ( (__GNUC__ == 2) && \
62 defined(__GNUC_MINOR__) && \
63 (__GNUC_MINOR__ >= 7) ) )
64 #define UNUSED_ARG(x) x __attribute__((__unused__))
65 #define INLINE inline
66 #else
67 #define UNUSED_ARG(x) x
68 #define INLINE /**/
69 #endif
70
71 /* new size of filesystem, in sectors */
72 static int newsize;
73
74 /* fd open onto disk device */
75 static int fd;
76
77 /* must we break up big I/O operations - see checksmallio() */
78 static int smallio;
79
80 /* size of a cg, in bytes, rounded up to a frag boundary */
81 static int cgblksz;
82
83 /* possible superblock localtions */
84 static int search[] = SBLOCKSEARCH;
85 /* location of the superblock */
86 static off_t where;
87
88 /* Superblocks. */
89 static struct fs *oldsb; /* before we started */
90 static struct fs *newsb; /* copy to work with */
91 /* Buffer to hold the above. Make sure it's aligned correctly. */
92 static char sbbuf[2 * SBLOCKSIZE] __attribute__((__aligned__(__alignof__(struct fs))));
93
94 /* a cg's worth of brand new squeaky-clean inodes */
95 static struct ufs1_dinode *zinodes;
96
97 /* pointers to the in-core cgs, read off disk and possibly modified */
98 static struct cg **cgs;
99
100 /* pointer to csum array - the stuff pointed to on-disk by fs_csaddr */
101 static struct csum *csums;
102
103 /* per-cg flags, indexed by cg number */
104 static unsigned char *cgflags;
105 #define CGF_DIRTY 0x01 /* needs to be written to disk */
106 #define CGF_BLKMAPS 0x02 /* block bitmaps need rebuilding */
107 #define CGF_INOMAPS 0x04 /* inode bitmaps need rebuilding */
108
109 /* when shrinking, these two arrays record how we want blocks to move. */
110 /* if blkmove[i] is j, the frag that started out as frag #i should end */
111 /* up as frag #j. inomove[i]=j means, similarly, that the inode that */
112 /* started out as inode i should end up as inode j. */
113 static unsigned int *blkmove;
114 static unsigned int *inomove;
115
116 /* in-core copies of all inodes in the fs, indexed by inumber */
117 static struct ufs1_dinode *inodes;
118
119 /* per-inode flags, indexed by inumber */
120 static unsigned char *iflags;
121 #define IF_DIRTY 0x01 /* needs to be written to disk */
122 #define IF_BDIRTY 0x02 /* like DIRTY, but is set on first inode in a
123 * block of inodes, and applies to the whole
124 * block. */
125
126 /* Old FFS1 macros */
127 #define cg_blktot(cgp, ns) \
128 (cg_chkmagic(cgp, ns) ? \
129 ((int32_t *)((u_int8_t *)(cgp) + ufs_rw32((cgp)->cg_old_btotoff, (ns)))) \
130 : (((struct ocg *)(cgp))->cg_btot))
131 #define cg_blks(fs, cgp, cylno, ns) \
132 (cg_chkmagic(cgp, ns) ? \
133 ((int16_t *)((u_int8_t *)(cgp) + ufs_rw32((cgp)->cg_old_boff, (ns))) + \
134 (cylno) * (fs)->fs_old_nrpos) \
135 : (((struct ocg *)(cgp))->cg_b[cylno]))
136 #define cbtocylno(fs, bno) \
137 (fsbtodb(fs, bno) / (fs)->fs_old_spc)
138 #define cbtorpos(fs, bno) \
139 ((fs)->fs_old_nrpos <= 1 ? 0 : \
140 (fsbtodb(fs, bno) % (fs)->fs_old_spc / \
141 (fs)->fs_old_nsect * (fs)->fs_old_trackskew + \
142 fsbtodb(fs, bno) % (fs)->fs_old_spc % \
143 (fs)->fs_old_nsect * (fs)->fs_old_interleave) %\
144 (fs)->fs_old_nsect * (fs)->fs_old_nrpos / (fs)->fs_old_npsect)
145 #define dblksize(fs, dip, lbn) \
146 (((lbn) >= NDADDR || (dip)->di_size >= lblktosize(fs, (lbn) + 1)) \
147 ? (fs)->fs_bsize \
148 : (fragroundup(fs, blkoff(fs, (dip)->di_size))))
149
150
151 /*
152 * Number of disk sectors per block/fragment; assumes DEV_BSIZE byte
153 * sector size.
154 */
155 #define NSPB(fs) ((fs)->fs_old_nspf << (fs)->fs_fragshift)
156 #define NSPF(fs) ((fs)->fs_old_nspf)
157
158 /*
159 * See if we need to break up large I/O operations. This should never
160 * be needed, but under at least one <version,platform> combination,
161 * large enough disk transfers to the raw device hang. So if we're
162 * talking to a character special device, play it safe; in this case,
163 * readat() and writeat() break everything up into pieces no larger
164 * than 8K, doing multiple syscalls for larger operations.
165 */
166 static void
167 checksmallio(void)
168 {
169 struct stat stb;
170
171 fstat(fd, &stb);
172 smallio = ((stb.st_mode & S_IFMT) == S_IFCHR);
173 }
174 /*
175 * Read size bytes starting at blkno into buf. blkno is in DEV_BSIZE
176 * units, ie, after fsbtodb(); size is in bytes.
177 */
178 static void
179 readat(off_t blkno, void *buf, int size)
180 {
181 /* Seek to the correct place. */
182 if (lseek(fd, blkno * DEV_BSIZE, L_SET) < 0)
183 err(1, "lseek failed");
184
185 /* See if we have to break up the transfer... */
186 if (smallio) {
187 char *bp; /* pointer into buf */
188 int left; /* bytes left to go */
189 int n; /* number to do this time around */
190 int rv; /* syscall return value */
191 bp = buf;
192 left = size;
193 while (left > 0) {
194 n = (left > 8192) ? 8192 : left;
195 rv = read(fd, bp, n);
196 if (rv < 0)
197 err(1, "read failed");
198 if (rv != n)
199 errx(1, "read: wanted %d, got %d", n, rv);
200 bp += n;
201 left -= n;
202 }
203 } else {
204 int rv;
205 rv = read(fd, buf, size);
206 if (rv < 0)
207 err(1, "read failed");
208 if (rv != size)
209 errx(1, "read: wanted %d, got %d", size, rv);
210 }
211 }
212 /*
213 * Write size bytes from buf starting at blkno. blkno is in DEV_BSIZE
214 * units, ie, after fsbtodb(); size is in bytes.
215 */
216 static void
217 writeat(off_t blkno, const void *buf, int size)
218 {
219 /* Seek to the correct place. */
220 if (lseek(fd, blkno * DEV_BSIZE, L_SET) < 0)
221 err(1, "lseek failed");
222 /* See if we have to break up the transfer... */
223 if (smallio) {
224 const char *bp; /* pointer into buf */
225 int left; /* bytes left to go */
226 int n; /* number to do this time around */
227 int rv; /* syscall return value */
228 bp = buf;
229 left = size;
230 while (left > 0) {
231 n = (left > 8192) ? 8192 : left;
232 rv = write(fd, bp, n);
233 if (rv < 0)
234 err(1, "write failed");
235 if (rv != n)
236 errx(1, "write: wanted %d, got %d", n, rv);
237 bp += n;
238 left -= n;
239 }
240 } else {
241 int rv;
242 rv = write(fd, buf, size);
243 if (rv < 0)
244 err(1, "write failed");
245 if (rv != size)
246 errx(1, "write: wanted %d, got %d", size, rv);
247 }
248 }
249 /*
250 * Never-fail versions of malloc() and realloc(), and an allocation
251 * routine (which also never fails) for allocating memory that will
252 * never be freed until exit.
253 */
254
255 /*
256 * Never-fail malloc.
257 */
258 static void *
259 nfmalloc(size_t nb, const char *tag)
260 {
261 void *rv;
262
263 rv = malloc(nb);
264 if (rv)
265 return (rv);
266 err(1, "Can't allocate %lu bytes for %s",
267 (unsigned long int) nb, tag);
268 }
269 /*
270 * Never-fail realloc.
271 */
272 static void *
273 nfrealloc(void *blk, size_t nb, const char *tag)
274 {
275 void *rv;
276
277 rv = realloc(blk, nb);
278 if (rv)
279 return (rv);
280 err(1, "Can't re-allocate %lu bytes for %s",
281 (unsigned long int) nb, tag);
282 }
283 /*
284 * Allocate memory that will never be freed or reallocated. Arguably
285 * this routine should handle small allocations by chopping up pages,
286 * but that's not worth the bother; it's not called more than a
287 * handful of times per run, and if the allocations are that small the
288 * waste in giving each one its own page is ignorable.
289 */
290 static void *
291 alloconce(size_t nb, const char *tag)
292 {
293 void *rv;
294
295 rv = mmap(0, nb, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
296 if (rv != MAP_FAILED)
297 return (rv);
298 err(1, "Can't map %lu bytes for %s",
299 (unsigned long int) nb, tag);
300 }
301 /*
302 * Load the cgs and csums off disk. Also allocates the space to load
303 * them into and initializes the per-cg flags.
304 */
305 static void
306 loadcgs(void)
307 {
308 int cg;
309 char *cgp;
310
311 cgblksz = roundup(oldsb->fs_cgsize, oldsb->fs_fsize);
312 cgs = nfmalloc(oldsb->fs_ncg * sizeof(struct cg *), "cg pointers");
313 cgp = alloconce(oldsb->fs_ncg * cgblksz, "cgs");
314 cgflags = nfmalloc(oldsb->fs_ncg, "cg flags");
315 csums = nfmalloc(oldsb->fs_cssize, "cg summary");
316 for (cg = 0; cg < oldsb->fs_ncg; cg++) {
317 cgs[cg] = (struct cg *) cgp;
318 readat(fsbtodb(oldsb, cgtod(oldsb, cg)), cgp, cgblksz);
319 cgflags[cg] = 0;
320 cgp += cgblksz;
321 }
322 readat(fsbtodb(oldsb, oldsb->fs_csaddr), csums, oldsb->fs_cssize);
323 }
324 /*
325 * Set n bits, starting with bit #base, in the bitmap pointed to by
326 * bitvec (which is assumed to be large enough to include bits base
327 * through base+n-1).
328 */
329 static void
330 set_bits(unsigned char *bitvec, unsigned int base, unsigned int n)
331 {
332 if (n < 1)
333 return; /* nothing to do */
334 if (base & 7) { /* partial byte at beginning */
335 if (n <= 8 - (base & 7)) { /* entirely within one byte */
336 bitvec[base >> 3] |= (~((~0U) << n)) << (base & 7);
337 return;
338 }
339 bitvec[base >> 3] |= (~0U) << (base & 7);
340 n -= 8 - (base & 7);
341 base = (base & ~7) + 8;
342 }
343 if (n >= 8) { /* do full bytes */
344 memset(bitvec + (base >> 3), 0xff, n >> 3);
345 base += n & ~7;
346 n &= 7;
347 }
348 if (n) { /* partial byte at end */
349 bitvec[base >> 3] |= ~((~0U) << n);
350 }
351 }
352 /*
353 * Clear n bits, starting with bit #base, in the bitmap pointed to by
354 * bitvec (which is assumed to be large enough to include bits base
355 * through base+n-1). Code parallels set_bits().
356 */
357 static void
358 clr_bits(unsigned char *bitvec, int base, int n)
359 {
360 if (n < 1)
361 return;
362 if (base & 7) {
363 if (n <= 8 - (base & 7)) {
364 bitvec[base >> 3] &= ~((~((~0U) << n)) << (base & 7));
365 return;
366 }
367 bitvec[base >> 3] &= ~((~0U) << (base & 7));
368 n -= 8 - (base & 7);
369 base = (base & ~7) + 8;
370 }
371 if (n >= 8) {
372 bzero(bitvec + (base >> 3), n >> 3);
373 base += n & ~7;
374 n &= 7;
375 }
376 if (n) {
377 bitvec[base >> 3] &= (~0U) << n;
378 }
379 }
380 /*
381 * Test whether bit #bit is set in the bitmap pointed to by bitvec.
382 */
383 INLINE static int
384 bit_is_set(unsigned char *bitvec, int bit)
385 {
386 return (bitvec[bit >> 3] & (1 << (bit & 7)));
387 }
388 /*
389 * Test whether bit #bit is clear in the bitmap pointed to by bitvec.
390 */
391 INLINE static int
392 bit_is_clr(unsigned char *bitvec, int bit)
393 {
394 return (!bit_is_set(bitvec, bit));
395 }
396 /*
397 * Test whether a whole block of bits is set in a bitmap. This is
398 * designed for testing (aligned) disk blocks in a bit-per-frag
399 * bitmap; it has assumptions wired into it based on that, essentially
400 * that the entire block fits into a single byte. This returns true
401 * iff _all_ the bits are set; it is not just the complement of
402 * blk_is_clr on the same arguments (unless blkfrags==1).
403 */
404 INLINE static int
405 blk_is_set(unsigned char *bitvec, int blkbase, int blkfrags)
406 {
407 unsigned int mask;
408
409 mask = (~((~0U) << blkfrags)) << (blkbase & 7);
410 return ((bitvec[blkbase >> 3] & mask) == mask);
411 }
412 /*
413 * Test whether a whole block of bits is clear in a bitmap. See
414 * blk_is_set (above) for assumptions. This returns true iff _all_
415 * the bits are clear; it is not just the complement of blk_is_set on
416 * the same arguments (unless blkfrags==1).
417 */
418 INLINE static int
419 blk_is_clr(unsigned char *bitvec, int blkbase, int blkfrags)
420 {
421 unsigned int mask;
422
423 mask = (~((~0U) << blkfrags)) << (blkbase & 7);
424 return ((bitvec[blkbase >> 3] & mask) == 0);
425 }
426 /*
427 * Initialize a new cg. Called when growing. Assumes memory has been
428 * allocated but not otherwise set up. This code sets the fields of
429 * the cg, initializes the bitmaps (and cluster summaries, if
430 * applicable), updates both per-cylinder summary info and the global
431 * summary info in newsb; it also writes out new inodes for the cg.
432 *
433 * This code knows it can never be called for cg 0, which makes it a
434 * bit simpler than it would otherwise be.
435 */
436 static void
437 initcg(int cgn)
438 {
439 struct cg *cg; /* The in-core cg, of course */
440 int base; /* Disk address of cg base */
441 int dlow; /* Size of pre-cg data area */
442 int dhigh; /* Offset of post-inode data area, from base */
443 int dmax; /* Offset of end of post-inode data area */
444 int i; /* Generic loop index */
445 int n; /* Generic count */
446
447 cg = cgs[cgn];
448 /* Place the data areas */
449 base = cgbase(newsb, cgn);
450 dlow = cgsblock(newsb, cgn) - base;
451 dhigh = cgdmin(newsb, cgn) - base;
452 dmax = newsb->fs_size - base;
453 if (dmax > newsb->fs_fpg)
454 dmax = newsb->fs_fpg;
455 /*
456 * Clear out the cg - assumes all-0-bytes is the correct way
457 * to initialize fields we don't otherwise touch, which is
458 * perhaps not the right thing to do, but it's what fsck and
459 * mkfs do.
460 */
461 bzero(cg, newsb->fs_cgsize);
462 cg->cg_time = newsb->fs_time;
463 cg->cg_magic = CG_MAGIC;
464 cg->cg_cgx = cgn;
465 cg->cg_old_ncyl = newsb->fs_old_cpg;
466 /* fsck whines if the cg->cg_old_ncyl value in the last cg is fs_old_cpg
467 * instead of zero, when fs_old_cpg is the correct value. */
468 /* XXX fix once fsck is fixed */
469 if ((cgn == newsb->fs_ncg - 1) /* && (newsb->fs_old_ncyl % newsb->fs_old_cpg) */ ) {
470 cg->cg_old_ncyl = newsb->fs_old_ncyl % newsb->fs_old_cpg;
471 }
472 cg->cg_niblk = newsb->fs_ipg;
473 cg->cg_ndblk = dmax;
474 /* Set up the bitmap pointers. We have to be careful to lay out the
475 * cg _exactly_ the way mkfs and fsck do it, since fsck compares the
476 * _entire_ cg against a recomputed cg, and whines if there is any
477 * mismatch, including the bitmap offsets. */
478 /* XXX update this comment when fsck is fixed */
479 cg->cg_old_btotoff = &cg->cg_space[0] - (unsigned char *) cg;
480 cg->cg_old_boff = cg->cg_old_btotoff
481 + (newsb->fs_old_cpg * sizeof(int32_t));
482 cg->cg_iusedoff = cg->cg_old_boff +
483 (newsb->fs_old_cpg * newsb->fs_old_nrpos * sizeof(int16_t));
484 cg->cg_freeoff = cg->cg_iusedoff + howmany(newsb->fs_ipg, NBBY);
485 if (newsb->fs_contigsumsize > 0) {
486 cg->cg_nclusterblks = cg->cg_ndblk / newsb->fs_frag;
487 cg->cg_clustersumoff = cg->cg_freeoff +
488 howmany(newsb->fs_old_cpg * newsb->fs_old_spc / NSPF(newsb),
489 NBBY) - sizeof(int32_t);
490 cg->cg_clustersumoff =
491 roundup(cg->cg_clustersumoff, sizeof(int32_t));
492 cg->cg_clusteroff = cg->cg_clustersumoff +
493 ((newsb->fs_contigsumsize + 1) * sizeof(int32_t));
494 cg->cg_nextfreeoff = cg->cg_clusteroff +
495 howmany(newsb->fs_old_cpg * newsb->fs_old_spc / NSPB(newsb),
496 NBBY);
497 n = dlow / newsb->fs_frag;
498 if (n > 0) {
499 set_bits(cg_clustersfree(cg, 0), 0, n);
500 cg_clustersum(cg, 0)[(n > newsb->fs_contigsumsize) ?
501 newsb->fs_contigsumsize : n]++;
502 }
503 } else {
504 cg->cg_nextfreeoff = cg->cg_freeoff +
505 howmany(newsb->fs_old_cpg * newsb->fs_old_spc / NSPF(newsb),
506 NBBY);
507 }
508 /* Mark the data areas as free; everything else is marked busy by the
509 * bzero up at the top. */
510 set_bits(cg_blksfree(cg, 0), 0, dlow);
511 set_bits(cg_blksfree(cg, 0), dhigh, dmax - dhigh);
512 /* Initialize summary info */
513 cg->cg_cs.cs_ndir = 0;
514 cg->cg_cs.cs_nifree = newsb->fs_ipg;
515 cg->cg_cs.cs_nbfree = dlow / newsb->fs_frag;
516 cg->cg_cs.cs_nffree = 0;
517
518 /* This is the simplest way of doing this; we perhaps could compute
519 * the correct cg_blktot()[] and cg_blks()[] values other ways, but it
520 * would be complicated and hardly seems worth the effort. (The
521 * reason there isn't frag-at-beginning and frag-at-end code here,
522 * like the code below for the post-inode data area, is that the
523 * pre-sb data area always starts at 0, and thus is block-aligned, and
524 * always ends at the sb, which is block-aligned.) */
525 for (i = 0; i < dlow; i += newsb->fs_frag) {
526 cg_blktot(cg, 0)[cbtocylno(newsb, i)]++;
527 cg_blks(newsb, cg, cbtocylno(newsb, i), 0)[cbtorpos(newsb, i)]++;
528 }
529 /* Deal with a partial block at the beginning of the post-inode area.
530 * I'm not convinced this can happen - I think the inodes are always
531 * block-aligned and always an integral number of blocks - but it's
532 * cheap to do the right thing just in case. */
533 if (dhigh % newsb->fs_frag) {
534 n = newsb->fs_frag - (dhigh % newsb->fs_frag);
535 cg->cg_frsum[n]++;
536 cg->cg_cs.cs_nffree += n;
537 dhigh += n;
538 }
539 n = (dmax - dhigh) / newsb->fs_frag;
540 /* We have n full-size blocks in the post-inode data area. */
541 if (n > 0) {
542 cg->cg_cs.cs_nbfree += n;
543 if (newsb->fs_contigsumsize > 0) {
544 i = dhigh / newsb->fs_frag;
545 set_bits(cg_clustersfree(cg, 0), i, n);
546 cg_clustersum(cg, 0)[(n > newsb->fs_contigsumsize) ?
547 newsb->fs_contigsumsize : n]++;
548 }
549 for (i = n; i > 0; i--) {
550 cg_blktot(cg, 0)[cbtocylno(newsb, dhigh)]++;
551 cg_blks(newsb, cg,
552 cbtocylno(newsb, dhigh), 0)[cbtorpos(newsb,
553 dhigh)]++;
554 dhigh += newsb->fs_frag;
555 }
556 }
557 /* Deal with any leftover frag at the end of the cg. */
558 i = dmax - dhigh;
559 if (i) {
560 cg->cg_frsum[i]++;
561 cg->cg_cs.cs_nffree += i;
562 }
563 /* Update the csum info. */
564 csums[cgn] = cg->cg_cs;
565 newsb->fs_cstotal.cs_nffree += cg->cg_cs.cs_nffree;
566 newsb->fs_cstotal.cs_nbfree += cg->cg_cs.cs_nbfree;
567 newsb->fs_cstotal.cs_nifree += cg->cg_cs.cs_nifree;
568 /* Write out the cleared inodes. */
569 writeat(fsbtodb(newsb, cgimin(newsb, cgn)), zinodes,
570 newsb->fs_ipg * sizeof(struct ufs1_dinode));
571 /* Dirty the cg. */
572 cgflags[cgn] |= CGF_DIRTY;
573 }
574 /*
575 * Find free space, at least nfrags consecutive frags of it. Pays no
576 * attention to block boundaries, but refuses to straddle cg
577 * boundaries, even if the disk blocks involved are in fact
578 * consecutive. Return value is the frag number of the first frag of
579 * the block, or -1 if no space was found. Uses newsb for sb values,
580 * and assumes the cgs[] structures correctly describe the area to be
581 * searched.
582 *
583 * XXX is there a bug lurking in the ignoring of block boundaries by
584 * the routine used by fragmove() in evict_data()? Can an end-of-file
585 * frag legally straddle a block boundary? If not, this should be
586 * cloned and fixed to stop at block boundaries for that use. The
587 * current one may still be needed for csum info motion, in case that
588 * takes up more than a whole block (is the csum info allowed to begin
589 * partway through a block and continue into the following block?).
590 *
591 * If we wrap off the end of the filesystem back to the beginning, we
592 * can end up searching the end of the filesystem twice. I ignore
593 * this inefficiency, since if that happens we're going to croak with
594 * a no-space error anyway, so it happens at most once.
595 */
596 static int
597 find_freespace(unsigned int nfrags)
598 {
599 static int hand = 0; /* hand rotates through all frags in the fs */
600 int cgsize; /* size of the cg hand currently points into */
601 int cgn; /* number of cg hand currently points into */
602 int fwc; /* frag-within-cg number of frag hand points
603 * to */
604 int run; /* length of run of free frags seen so far */
605 int secondpass; /* have we wrapped from end of fs to
606 * beginning? */
607 unsigned char *bits; /* cg_blksfree()[] for cg hand points into */
608
609 cgn = dtog(newsb, hand);
610 fwc = dtogd(newsb, hand);
611 secondpass = (hand == 0);
612 run = 0;
613 bits = cg_blksfree(cgs[cgn], 0);
614 cgsize = cgs[cgn]->cg_ndblk;
615 while (1) {
616 if (bit_is_set(bits, fwc)) {
617 run++;
618 if (run >= nfrags)
619 return (hand + 1 - run);
620 } else {
621 run = 0;
622 }
623 hand++;
624 fwc++;
625 if (fwc >= cgsize) {
626 fwc = 0;
627 cgn++;
628 if (cgn >= newsb->fs_ncg) {
629 hand = 0;
630 if (secondpass)
631 return (-1);
632 secondpass = 1;
633 cgn = 0;
634 }
635 bits = cg_blksfree(cgs[cgn], 0);
636 cgsize = cgs[cgn]->cg_ndblk;
637 run = 0;
638 }
639 }
640 }
641 /*
642 * Find a free block of disk space. Finds an entire block of frags,
643 * all of which are free. Return value is the frag number of the
644 * first frag of the block, or -1 if no space was found. Uses newsb
645 * for sb values, and assumes the cgs[] structures correctly describe
646 * the area to be searched.
647 *
648 * See find_freespace(), above, for remarks about hand wrapping around.
649 */
650 static int
651 find_freeblock(void)
652 {
653 static int hand = 0; /* hand rotates through all frags in fs */
654 int cgn; /* cg number of cg hand points into */
655 int fwc; /* frag-within-cg number of frag hand points
656 * to */
657 int cgsize; /* size of cg hand points into */
658 int secondpass; /* have we wrapped from end to beginning? */
659 unsigned char *bits; /* cg_blksfree()[] for cg hand points into */
660
661 cgn = dtog(newsb, hand);
662 fwc = dtogd(newsb, hand);
663 secondpass = (hand == 0);
664 bits = cg_blksfree(cgs[cgn], 0);
665 cgsize = blknum(newsb, cgs[cgn]->cg_ndblk);
666 while (1) {
667 if (blk_is_set(bits, fwc, newsb->fs_frag))
668 return (hand);
669 fwc += newsb->fs_frag;
670 hand += newsb->fs_frag;
671 if (fwc >= cgsize) {
672 fwc = 0;
673 cgn++;
674 if (cgn >= newsb->fs_ncg) {
675 hand = 0;
676 if (secondpass)
677 return (-1);
678 secondpass = 1;
679 cgn = 0;
680 }
681 bits = cg_blksfree(cgs[cgn], 0);
682 cgsize = blknum(newsb, cgs[cgn]->cg_ndblk);
683 }
684 }
685 }
686 /*
687 * Find a free inode, returning its inumber or -1 if none was found.
688 * Uses newsb for sb values, and assumes the cgs[] structures
689 * correctly describe the area to be searched.
690 *
691 * See find_freespace(), above, for remarks about hand wrapping around.
692 */
693 static int
694 find_freeinode(void)
695 {
696 static int hand = 0; /* hand rotates through all inodes in fs */
697 int cgn; /* cg number of cg hand points into */
698 int iwc; /* inode-within-cg number of inode hand points
699 * to */
700 int secondpass; /* have we wrapped from end to beginning? */
701 unsigned char *bits; /* cg_inosused()[] for cg hand points into */
702
703 cgn = hand / newsb->fs_ipg;
704 iwc = hand % newsb->fs_ipg;
705 secondpass = (hand == 0);
706 bits = cg_inosused(cgs[cgn], 0);
707 while (1) {
708 if (bit_is_clr(bits, iwc))
709 return (hand);
710 hand++;
711 iwc++;
712 if (iwc >= newsb->fs_ipg) {
713 iwc = 0;
714 cgn++;
715 if (cgn >= newsb->fs_ncg) {
716 hand = 0;
717 if (secondpass)
718 return (-1);
719 secondpass = 1;
720 cgn = 0;
721 }
722 bits = cg_inosused(cgs[cgn], 0);
723 }
724 }
725 }
726 /*
727 * Mark a frag as free. Sets the frag's bit in the cg_blksfree bitmap
728 * for the appropriate cg, and marks the cg as dirty.
729 */
730 static void
731 free_frag(int fno)
732 {
733 int cgn;
734
735 cgn = dtog(newsb, fno);
736 set_bits(cg_blksfree(cgs[cgn], 0), dtogd(newsb, fno), 1);
737 cgflags[cgn] |= CGF_DIRTY | CGF_BLKMAPS;
738 }
739 /*
740 * Allocate a frag. Clears the frag's bit in the cg_blksfree bitmap
741 * for the appropriate cg, and marks the cg as dirty.
742 */
743 static void
744 alloc_frag(int fno)
745 {
746 int cgn;
747
748 cgn = dtog(newsb, fno);
749 clr_bits(cg_blksfree(cgs[cgn], 0), dtogd(newsb, fno), 1);
750 cgflags[cgn] |= CGF_DIRTY | CGF_BLKMAPS;
751 }
752 /*
753 * Fix up the csum array. If shrinking, this involves freeing zero or
754 * more frags; if growing, it involves allocating them, or if the
755 * frags being grown into aren't free, finding space elsewhere for the
756 * csum info. (If the number of occupied frags doesn't change,
757 * nothing happens here.)
758 */
759 static void
760 csum_fixup(void)
761 {
762 int nold; /* # frags in old csum info */
763 int ntot; /* # frags in new csum info */
764 int nnew; /* ntot-nold */
765 int newloc; /* new location for csum info, if necessary */
766 int i; /* generic loop index */
767 int j; /* generic loop index */
768 int f; /* "from" frag number, if moving */
769 int t; /* "to" frag number, if moving */
770 int cgn; /* cg number, used when shrinking */
771
772 ntot = howmany(newsb->fs_cssize, newsb->fs_fsize);
773 nold = howmany(oldsb->fs_cssize, newsb->fs_fsize);
774 nnew = ntot - nold;
775 /* First, if there's no change in frag counts, it's easy. */
776 if (nnew == 0)
777 return;
778 /* Next, if we're shrinking, it's almost as easy. Just free up any
779 * frags in the old area we no longer need. */
780 if (nnew < 0) {
781 for ((i = newsb->fs_csaddr + ntot - 1), (j = nnew);
782 j < 0;
783 i--, j++) {
784 free_frag(i);
785 }
786 return;
787 }
788 /* We must be growing. Check to see that the new csum area fits
789 * within the filesystem. I think this can never happen, since for
790 * the csum area to grow, we must be adding at least one cg, so the
791 * old csum area can't be this close to the end of the new filesystem.
792 * But it's a cheap check. */
793 /* XXX what if csum info is at end of cg and grows into next cg, what
794 * if it spills over onto the next cg's backup superblock? Can this
795 * happen? */
796 if (newsb->fs_csaddr + ntot <= newsb->fs_size) {
797 /* Okay, it fits - now, see if the space we want is free. */
798 for ((i = newsb->fs_csaddr + nold), (j = nnew);
799 j > 0;
800 i++, j--) {
801 cgn = dtog(newsb, i);
802 if (bit_is_clr(cg_blksfree(cgs[cgn], 0),
803 dtogd(newsb, i)))
804 break;
805 }
806 if (j <= 0) {
807 /* Win win - all the frags we want are free. Allocate
808 * 'em and we're all done. */
809 for ((i = newsb->fs_csaddr + ntot - nnew), (j = nnew); j > 0; i++, j--) {
810 alloc_frag(i);
811 }
812 return;
813 }
814 }
815 /* We have to move the csum info, sigh. Look for new space, free old
816 * space, and allocate new. Update fs_csaddr. We don't copy anything
817 * on disk at this point; the csum info will be written to the
818 * then-current fs_csaddr as part of the final flush. */
819 newloc = find_freespace(ntot);
820 if (newloc < 0) {
821 printf("Sorry, no space available for new csums\n");
822 exit(1);
823 }
824 for (i = 0, f = newsb->fs_csaddr, t = newloc; i < ntot; i++, f++, t++) {
825 if (i < nold) {
826 free_frag(f);
827 }
828 alloc_frag(t);
829 }
830 newsb->fs_csaddr = newloc;
831 }
832 /*
833 * Recompute newsb->fs_dsize. Just scans all cgs, adding the number of
834 * data blocks in that cg to the total.
835 */
836 static void
837 recompute_fs_dsize(void)
838 {
839 int i;
840
841 newsb->fs_dsize = 0;
842 for (i = 0; i < newsb->fs_ncg; i++) {
843 int dlow; /* size of before-sb data area */
844 int dhigh; /* offset of post-inode data area */
845 int dmax; /* total size of cg */
846 int base; /* base of cg, since cgsblock() etc add it in */
847 base = cgbase(newsb, i);
848 dlow = cgsblock(newsb, i) - base;
849 dhigh = cgdmin(newsb, i) - base;
850 dmax = newsb->fs_size - base;
851 if (dmax > newsb->fs_fpg)
852 dmax = newsb->fs_fpg;
853 newsb->fs_dsize += dlow + dmax - dhigh;
854 }
855 /* Space in cg 0 before cgsblock is boot area, not free space! */
856 newsb->fs_dsize -= cgsblock(newsb, 0) - cgbase(newsb, 0);
857 /* And of course the csum info takes up space. */
858 newsb->fs_dsize -= howmany(newsb->fs_cssize, newsb->fs_fsize);
859 }
860 /*
861 * Return the current time. We call this and assign, rather than
862 * calling time() directly, as insulation against OSes where fs_time
863 * is not a time_t.
864 */
865 static time_t
866 timestamp(void)
867 {
868 time_t t;
869
870 time(&t);
871 return (t);
872 }
873 /*
874 * Grow the filesystem.
875 */
876 static void
877 grow(void)
878 {
879 int i;
880
881 /* Update the timestamp. */
882 newsb->fs_time = timestamp();
883 /* Allocate and clear the new-inode area, in case we add any cgs. */
884 zinodes = alloconce(newsb->fs_ipg * sizeof(struct ufs1_dinode),
885 "zeroed inodes");
886 bzero(zinodes, newsb->fs_ipg * sizeof(struct ufs1_dinode));
887 /* Update the size. */
888 newsb->fs_size = dbtofsb(newsb, newsize);
889 /* Did we actually not grow? (This can happen if newsize is less than
890 * a frag larger than the old size - unlikely, but no excuse to
891 * misbehave if it happens.) */
892 if (newsb->fs_size == oldsb->fs_size)
893 return;
894 /* Check that the new last sector (frag, actually) is writable. Since
895 * it's at least one frag larger than it used to be, we know we aren't
896 * overwriting anything important by this. (The choice of sbbuf as
897 * what to write is irrelevant; it's just something handy that's known
898 * to be at least one frag in size.) */
899 writeat(newsb->fs_size - 1, &sbbuf, newsb->fs_fsize);
900 /* Update fs_old_ncyl and fs_ncg. */
901 newsb->fs_old_ncyl = (newsb->fs_size * NSPF(newsb)) / newsb->fs_old_spc;
902 newsb->fs_ncg = howmany(newsb->fs_old_ncyl, newsb->fs_old_cpg);
903 /* Does the last cg end before the end of its inode area? There is no
904 * reason why this couldn't be handled, but it would complicate a lot
905 * of code (in all filesystem code - fsck, kernel, etc) because of the
906 * potential partial inode area, and the gain in space would be
907 * minimal, at most the pre-sb data area. */
908 if (cgdmin(newsb, newsb->fs_ncg - 1) > newsb->fs_size) {
909 newsb->fs_ncg--;
910 newsb->fs_old_ncyl = newsb->fs_ncg * newsb->fs_old_cpg;
911 newsb->fs_size = (newsb->fs_old_ncyl * newsb->fs_old_spc) / NSPF(newsb);
912 printf("Warning: last cylinder group is too small;\n");
913 printf(" dropping it. New size = %lu.\n",
914 (unsigned long int) fsbtodb(newsb, newsb->fs_size));
915 }
916 /* Find out how big the csum area is, and realloc csums if bigger. */
917 newsb->fs_cssize = fragroundup(newsb,
918 newsb->fs_ncg * sizeof(struct csum));
919 if (newsb->fs_cssize > oldsb->fs_cssize)
920 csums = nfrealloc(csums, newsb->fs_cssize, "new cg summary");
921 /* If we're adding any cgs, realloc structures and set up the new cgs. */
922 if (newsb->fs_ncg > oldsb->fs_ncg) {
923 char *cgp;
924 cgs = nfrealloc(cgs, newsb->fs_ncg * sizeof(struct cg *),
925 "cg pointers");
926 cgflags = nfrealloc(cgflags, newsb->fs_ncg, "cg flags");
927 bzero(cgflags + oldsb->fs_ncg, newsb->fs_ncg - oldsb->fs_ncg);
928 cgp = alloconce((newsb->fs_ncg - oldsb->fs_ncg) * cgblksz,
929 "cgs");
930 for (i = oldsb->fs_ncg; i < newsb->fs_ncg; i++) {
931 cgs[i] = (struct cg *) cgp;
932 initcg(i);
933 cgp += cgblksz;
934 }
935 cgs[oldsb->fs_ncg - 1]->cg_old_ncyl = oldsb->fs_old_cpg;
936 cgflags[oldsb->fs_ncg - 1] |= CGF_DIRTY;
937 }
938 /* If the old fs ended partway through a cg, we have to update the old
939 * last cg (though possibly not to a full cg!). */
940 if (oldsb->fs_size % oldsb->fs_fpg) {
941 struct cg *cg;
942 int newcgsize;
943 int prevcgtop;
944 int oldcgsize;
945 cg = cgs[oldsb->fs_ncg - 1];
946 cgflags[oldsb->fs_ncg - 1] |= CGF_DIRTY | CGF_BLKMAPS;
947 prevcgtop = oldsb->fs_fpg * (oldsb->fs_ncg - 1);
948 newcgsize = newsb->fs_size - prevcgtop;
949 if (newcgsize > newsb->fs_fpg)
950 newcgsize = newsb->fs_fpg;
951 oldcgsize = oldsb->fs_size % oldsb->fs_fpg;
952 set_bits(cg_blksfree(cg, 0), oldcgsize, newcgsize - oldcgsize);
953 cg->cg_old_ncyl = howmany(newcgsize * NSPF(newsb), newsb->fs_old_spc);
954 cg->cg_ndblk = newcgsize;
955 }
956 /* Fix up the csum info, if necessary. */
957 csum_fixup();
958 /* Make fs_dsize match the new reality. */
959 recompute_fs_dsize();
960 }
961 /*
962 * Call (*fn)() for each inode, passing the inode and its inumber. The
963 * number of cylinder groups is pased in, so this can be used to map
964 * over either the old or the new filesystem's set of inodes.
965 */
966 static void
967 map_inodes(void (*fn) (struct ufs1_dinode * di, unsigned int, void *arg), int ncg, void *cbarg) {
968 int i;
969 int ni;
970
971 ni = oldsb->fs_ipg * ncg;
972 for (i = 0; i < ni; i++)
973 (*fn) (inodes + i, i, cbarg);
974 }
975 /* Values for the third argument to the map function for
976 * map_inode_data_blocks. MDB_DATA indicates the block is contains
977 * file data; MDB_INDIR_PRE and MDB_INDIR_POST indicate that it's an
978 * indirect block. The MDB_INDIR_PRE call is made before the indirect
979 * block pointers are followed and the pointed-to blocks scanned,
980 * MDB_INDIR_POST after.
981 */
982 #define MDB_DATA 1
983 #define MDB_INDIR_PRE 2
984 #define MDB_INDIR_POST 3
985
986 typedef void (*mark_callback_t) (unsigned int blocknum, unsigned int nfrags, unsigned int blksize, int opcode);
987
988 /* Helper function - handles a data block. Calls the callback
989 * function and returns number of bytes occupied in file (actually,
990 * rounded up to a frag boundary). The name is historical. */
991 static int
992 markblk(mark_callback_t fn, struct ufs1_dinode * di, int bn, off_t o)
993 {
994 int sz;
995 int nb;
996 if (o >= di->di_size)
997 return (0);
998 sz = dblksize(newsb, di, lblkno(newsb, o));
999 nb = (sz > di->di_size - o) ? di->di_size - o : sz;
1000 if (bn)
1001 (*fn) (bn, numfrags(newsb, sz), nb, MDB_DATA);
1002 return (sz);
1003 }
1004 /* Helper function - handles an indirect block. Makes the
1005 * MDB_INDIR_PRE callback for the indirect block, loops over the
1006 * pointers and recurses, and makes the MDB_INDIR_POST callback.
1007 * Returns the number of bytes occupied in file, as does markblk().
1008 * For the sake of update_for_data_move(), we read the indirect block
1009 * _after_ making the _PRE callback. The name is historical. */
1010 static int
1011 markiblk(mark_callback_t fn, struct ufs1_dinode * di, int bn, off_t o, int lev)
1012 {
1013 int i;
1014 int j;
1015 int tot;
1016 static int32_t indirblk1[howmany(MAXBSIZE, sizeof(int32_t))];
1017 static int32_t indirblk2[howmany(MAXBSIZE, sizeof(int32_t))];
1018 static int32_t indirblk3[howmany(MAXBSIZE, sizeof(int32_t))];
1019 static int32_t *indirblks[3] = {
1020 &indirblk1[0], &indirblk2[0], &indirblk3[0]
1021 };
1022 if (lev < 0)
1023 return (markblk(fn, di, bn, o));
1024 if (bn == 0) {
1025 for (i = newsb->fs_bsize;
1026 lev >= 0;
1027 i *= NINDIR(newsb), lev--);
1028 return (i);
1029 }
1030 (*fn) (bn, newsb->fs_frag, newsb->fs_bsize, MDB_INDIR_PRE);
1031 readat(fsbtodb(newsb, bn), indirblks[lev], newsb->fs_bsize);
1032 tot = 0;
1033 for (i = 0; i < NINDIR(newsb); i++) {
1034 j = markiblk(fn, di, indirblks[lev][i], o, lev - 1);
1035 if (j == 0)
1036 break;
1037 o += j;
1038 tot += j;
1039 }
1040 (*fn) (bn, newsb->fs_frag, newsb->fs_bsize, MDB_INDIR_POST);
1041 return (tot);
1042 }
1043
1044
1045 /*
1046 * Call (*fn)() for each data block for an inode. This routine assumes
1047 * the inode is known to be of a type that has data blocks (file,
1048 * directory, or non-fast symlink). The called function is:
1049 *
1050 * (*fn)(unsigned int blkno, unsigned int nf, unsigned int nb, int op)
1051 *
1052 * where blkno is the frag number, nf is the number of frags starting
1053 * at blkno (always <= fs_frag), nb is the number of bytes that belong
1054 * to the file (usually nf*fs_frag, often less for the last block/frag
1055 * of a file).
1056 */
1057 static void
1058 map_inode_data_blocks(struct ufs1_dinode * di, mark_callback_t fn)
1059 {
1060 off_t o; /* offset within inode */
1061 int inc; /* increment for o - maybe should be off_t? */
1062 int b; /* index within di_db[] and di_ib[] arrays */
1063
1064 /* Scan the direct blocks... */
1065 o = 0;
1066 for (b = 0; b < NDADDR; b++) {
1067 inc = markblk(fn, di, di->di_db[b], o);
1068 if (inc == 0)
1069 break;
1070 o += inc;
1071 }
1072 /* ...and the indirect blocks. */
1073 if (inc) {
1074 for (b = 0; b < NIADDR; b++) {
1075 inc = markiblk(fn, di, di->di_ib[b], o, b);
1076 if (inc == 0)
1077 return;
1078 o += inc;
1079 }
1080 }
1081 }
1082
1083 static void
1084 dblk_callback(struct ufs1_dinode * di, unsigned int inum, void *arg)
1085 {
1086 mark_callback_t fn;
1087 fn = (mark_callback_t) arg;
1088 switch (di->di_mode & IFMT) {
1089 case IFLNK:
1090 if (di->di_size > newsb->fs_maxsymlinklen) {
1091 case IFDIR:
1092 case IFREG:
1093 map_inode_data_blocks(di, fn);
1094 }
1095 break;
1096 }
1097 }
1098 /*
1099 * Make a callback call, a la map_inode_data_blocks, for all data
1100 * blocks in the entire fs. This is used only once, in
1101 * update_for_data_move, but it's out at top level because the complex
1102 * downward-funarg nesting that would otherwise result seems to give
1103 * gcc gastric distress.
1104 */
1105 static void
1106 map_data_blocks(mark_callback_t fn, int ncg)
1107 {
1108 map_inodes(&dblk_callback, ncg, (void *) fn);
1109 }
1110 /*
1111 * Initialize the blkmove array.
1112 */
1113 static void
1114 blkmove_init(void)
1115 {
1116 int i;
1117
1118 blkmove = alloconce(oldsb->fs_size * sizeof(*blkmove), "blkmove");
1119 for (i = 0; i < oldsb->fs_size; i++)
1120 blkmove[i] = i;
1121 }
1122 /*
1123 * Load the inodes off disk. Allocates the structures and initializes
1124 * them - the inodes from disk, the flags to zero.
1125 */
1126 static void
1127 loadinodes(void)
1128 {
1129 int cg;
1130 struct ufs1_dinode *iptr;
1131
1132 inodes = alloconce(oldsb->fs_ncg * oldsb->fs_ipg * sizeof(struct ufs1_dinode), "inodes");
1133 iflags = alloconce(oldsb->fs_ncg * oldsb->fs_ipg, "inode flags");
1134 bzero(iflags, oldsb->fs_ncg * oldsb->fs_ipg);
1135 iptr = inodes;
1136 for (cg = 0; cg < oldsb->fs_ncg; cg++) {
1137 readat(fsbtodb(oldsb, cgimin(oldsb, cg)), iptr,
1138 oldsb->fs_ipg * sizeof(struct ufs1_dinode));
1139 iptr += oldsb->fs_ipg;
1140 }
1141 }
1142 /*
1143 * Report a filesystem-too-full problem.
1144 */
1145 static void
1146 toofull(void)
1147 {
1148 printf("Sorry, would run out of data blocks\n");
1149 exit(1);
1150 }
1151 /*
1152 * Record a desire to move "n" frags from "from" to "to".
1153 */
1154 static void
1155 mark_move(unsigned int from, unsigned int to, unsigned int n)
1156 {
1157 for (; n > 0; n--)
1158 blkmove[from++] = to++;
1159 }
1160 /* Helper function - evict n frags, starting with start (cg-relative).
1161 * The free bitmap is scanned, unallocated frags are ignored, and
1162 * each block of consecutive allocated frags is moved as a unit.
1163 */
1164 static void
1165 fragmove(struct cg * cg, int base, unsigned int start, unsigned int n)
1166 {
1167 int i;
1168 int run;
1169 run = 0;
1170 for (i = 0; i <= n; i++) {
1171 if ((i < n) && bit_is_clr(cg_blksfree(cg, 0), start + i)) {
1172 run++;
1173 } else {
1174 if (run > 0) {
1175 int off;
1176 off = find_freespace(run);
1177 if (off < 0)
1178 toofull();
1179 mark_move(base + start + i - run, off, run);
1180 set_bits(cg_blksfree(cg, 0), start + i - run,
1181 run);
1182 clr_bits(cg_blksfree(cgs[dtog(oldsb, off)], 0),
1183 dtogd(oldsb, off), run);
1184 }
1185 run = 0;
1186 }
1187 }
1188 }
1189 /*
1190 * Evict all data blocks from the given cg, starting at minfrag (based
1191 * at the beginning of the cg), for length nfrag. The eviction is
1192 * assumed to be entirely data-area; this should not be called with a
1193 * range overlapping the metadata structures in the cg. It also
1194 * assumes minfrag points into the given cg; it will misbehave if this
1195 * is not true.
1196 *
1197 * See the comment header on find_freespace() for one possible bug
1198 * lurking here.
1199 */
1200 static void
1201 evict_data(struct cg * cg, unsigned int minfrag, unsigned int nfrag)
1202 {
1203 int base; /* base of cg (in frags from beginning of fs) */
1204
1205
1206 base = cgbase(oldsb, cg->cg_cgx);
1207 /* Does the boundary fall in the middle of a block? To avoid breaking
1208 * between frags allocated as consecutive, we always evict the whole
1209 * block in this case, though one could argue we should check to see
1210 * if the frag before or after the break is unallocated. */
1211 if (minfrag % oldsb->fs_frag) {
1212 int n;
1213 n = minfrag % oldsb->fs_frag;
1214 minfrag -= n;
1215 nfrag += n;
1216 }
1217 /* Do whole blocks. If a block is wholly free, skip it; if wholly
1218 * allocated, move it in toto. If neither, call fragmove() to move
1219 * the frags to new locations. */
1220 while (nfrag >= oldsb->fs_frag) {
1221 if (!blk_is_set(cg_blksfree(cg, 0), minfrag, oldsb->fs_frag)) {
1222 if (blk_is_clr(cg_blksfree(cg, 0), minfrag,
1223 oldsb->fs_frag)) {
1224 int off;
1225 off = find_freeblock();
1226 if (off < 0)
1227 toofull();
1228 mark_move(base + minfrag, off, oldsb->fs_frag);
1229 set_bits(cg_blksfree(cg, 0), minfrag,
1230 oldsb->fs_frag);
1231 clr_bits(cg_blksfree(cgs[dtog(oldsb, off)], 0),
1232 dtogd(oldsb, off), oldsb->fs_frag);
1233 } else {
1234 fragmove(cg, base, minfrag, oldsb->fs_frag);
1235 }
1236 }
1237 minfrag += oldsb->fs_frag;
1238 nfrag -= oldsb->fs_frag;
1239 }
1240 /* Clean up any sub-block amount left over. */
1241 if (nfrag) {
1242 fragmove(cg, base, minfrag, nfrag);
1243 }
1244 }
1245 /*
1246 * Move all data blocks according to blkmove. We have to be careful,
1247 * because we may be updating indirect blocks that will themselves be
1248 * getting moved, or inode int32_t arrays that point to indirect
1249 * blocks that will be moved. We call this before
1250 * update_for_data_move, and update_for_data_move does inodes first,
1251 * then indirect blocks in preorder, so as to make sure that the
1252 * filesystem is self-consistent at all points, for better crash
1253 * tolerance. (We can get away with this only because all the writes
1254 * done by perform_data_move() are writing into space that's not used
1255 * by the old filesystem.) If we crash, some things may point to the
1256 * old data and some to the new, but both copies are the same. The
1257 * only wrong things should be csum info and free bitmaps, which fsck
1258 * is entirely capable of cleaning up.
1259 *
1260 * Since blkmove_init() initializes all blocks to move to their current
1261 * locations, we can have two blocks marked as wanting to move to the
1262 * same location, but only two and only when one of them is the one
1263 * that was already there. So if blkmove[i]==i, we ignore that entry
1264 * entirely - for unallocated blocks, we don't want it (and may be
1265 * putting something else there), and for allocated blocks, we don't
1266 * want to copy it anywhere.
1267 */
1268 static void
1269 perform_data_move(void)
1270 {
1271 int i;
1272 int run;
1273 int maxrun;
1274 char buf[65536];
1275
1276 maxrun = sizeof(buf) / newsb->fs_fsize;
1277 run = 0;
1278 for (i = 0; i < oldsb->fs_size; i++) {
1279 if ((blkmove[i] == i) ||
1280 (run >= maxrun) ||
1281 ((run > 0) &&
1282 (blkmove[i] != blkmove[i - 1] + 1))) {
1283 if (run > 0) {
1284 readat(fsbtodb(oldsb, i - run), &buf[0],
1285 run << oldsb->fs_fshift);
1286 writeat(fsbtodb(oldsb, blkmove[i - run]),
1287 &buf[0], run << oldsb->fs_fshift);
1288 }
1289 run = 0;
1290 }
1291 if (blkmove[i] != i)
1292 run++;
1293 }
1294 if (run > 0) {
1295 readat(fsbtodb(oldsb, i - run), &buf[0],
1296 run << oldsb->fs_fshift);
1297 writeat(fsbtodb(oldsb, blkmove[i - run]), &buf[0],
1298 run << oldsb->fs_fshift);
1299 }
1300 }
1301 /*
1302 * This modifies an array of int32_t, according to blkmove. This is
1303 * used to update inode block arrays and indirect blocks to point to
1304 * the new locations of data blocks.
1305 *
1306 * Return value is the number of int32_ts that needed updating; in
1307 * particular, the return value is zero iff nothing was modified.
1308 */
1309 static int
1310 movemap_blocks(int32_t * vec, int n)
1311 {
1312 int rv;
1313
1314 rv = 0;
1315 for (; n > 0; n--, vec++) {
1316 if (blkmove[*vec] != *vec) {
1317 *vec = blkmove[*vec];
1318 rv++;
1319 }
1320 }
1321 return (rv);
1322 }
1323 static void
1324 moveblocks_callback(struct ufs1_dinode * di, unsigned int inum, void *arg)
1325 {
1326 switch (di->di_mode & IFMT) {
1327 case IFLNK:
1328 if (di->di_size > oldsb->fs_maxsymlinklen) {
1329 case IFDIR:
1330 case IFREG:
1331 /* don't || these two calls; we need their
1332 * side-effects */
1333 if (movemap_blocks(&di->di_db[0], NDADDR)) {
1334 iflags[inum] |= IF_DIRTY;
1335 }
1336 if (movemap_blocks(&di->di_ib[0], NIADDR)) {
1337 iflags[inum] |= IF_DIRTY;
1338 }
1339 }
1340 break;
1341 }
1342 }
1343
1344 static void
1345 moveindir_callback(unsigned int off, unsigned int nfrag, unsigned int nbytes, int kind)
1346 {
1347 if (kind == MDB_INDIR_PRE) {
1348 int32_t blk[howmany(MAXBSIZE, sizeof(int32_t))];
1349 readat(fsbtodb(oldsb, off), &blk[0], oldsb->fs_bsize);
1350 if (movemap_blocks(&blk[0], NINDIR(oldsb))) {
1351 writeat(fsbtodb(oldsb, off), &blk[0], oldsb->fs_bsize);
1352 }
1353 }
1354 }
1355 /*
1356 * Update all inode data arrays and indirect blocks to point to the new
1357 * locations of data blocks. See the comment header on
1358 * perform_data_move for some ordering considerations.
1359 */
1360 static void
1361 update_for_data_move(void)
1362 {
1363 map_inodes(&moveblocks_callback, oldsb->fs_ncg, NULL);
1364 map_data_blocks(&moveindir_callback, oldsb->fs_ncg);
1365 }
1366 /*
1367 * Initialize the inomove array.
1368 */
1369 static void
1370 inomove_init(void)
1371 {
1372 int i;
1373
1374 inomove = alloconce(oldsb->fs_ipg * oldsb->fs_ncg * sizeof(*inomove),
1375 "inomove");
1376 for (i = (oldsb->fs_ipg * oldsb->fs_ncg) - 1; i >= 0; i--)
1377 inomove[i] = i;
1378 }
1379 /*
1380 * Flush all dirtied inodes to disk. Scans the inode flags array; for
1381 * each dirty inode, it sets the BDIRTY bit on the first inode in the
1382 * block containing the dirty inode. Then it scans by blocks, and for
1383 * each marked block, writes it.
1384 */
1385 static void
1386 flush_inodes(void)
1387 {
1388 int i;
1389 int ni;
1390 int m;
1391
1392 ni = newsb->fs_ipg * newsb->fs_ncg;
1393 m = INOPB(newsb) - 1;
1394 for (i = 0; i < ni; i++) {
1395 if (iflags[i] & IF_DIRTY) {
1396 iflags[i & ~m] |= IF_BDIRTY;
1397 }
1398 }
1399 m++;
1400 for (i = 0; i < ni; i += m) {
1401 if (iflags[i] & IF_BDIRTY) {
1402 writeat(fsbtodb(newsb, ino_to_fsba(newsb, i)),
1403 inodes + i, newsb->fs_bsize);
1404 }
1405 }
1406 }
1407 /*
1408 * Evict all inodes from the specified cg. shrink() already checked
1409 * that there were enough free inodes, so the no-free-inodes check is
1410 * a can't-happen. If it does trip, the filesystem should be in good
1411 * enough shape for fsck to fix; see the comment on perform_data_move
1412 * for the considerations in question.
1413 */
1414 static void
1415 evict_inodes(struct cg * cg)
1416 {
1417 int inum;
1418 int i;
1419 int fi;
1420
1421 inum = newsb->fs_ipg * cg->cg_cgx;
1422 for (i = 0; i < newsb->fs_ipg; i++, inum++) {
1423 if (inodes[inum].di_mode != 0) {
1424 fi = find_freeinode();
1425 if (fi < 0) {
1426 printf("Sorry, inodes evaporated - "
1427 "filesystem probably needs fsck\n");
1428 exit(1);
1429 }
1430 inomove[inum] = fi;
1431 clr_bits(cg_inosused(cg, 0), i, 1);
1432 set_bits(cg_inosused(cgs[ino_to_cg(newsb, fi)], 0),
1433 fi % newsb->fs_ipg, 1);
1434 }
1435 }
1436 }
1437 /*
1438 * Move inodes from old locations to new. Does not actually write
1439 * anything to disk; just copies in-core and sets dirty bits.
1440 *
1441 * We have to be careful here for reasons similar to those mentioned in
1442 * the comment header on perform_data_move, above: for the sake of
1443 * crash tolerance, we want to make sure everything is present at both
1444 * old and new locations before we update pointers. So we call this
1445 * first, then flush_inodes() to get them out on disk, then update
1446 * directories to match.
1447 */
1448 static void
1449 perform_inode_move(void)
1450 {
1451 int i;
1452 int ni;
1453
1454 ni = oldsb->fs_ipg * oldsb->fs_ncg;
1455 for (i = 0; i < ni; i++) {
1456 if (inomove[i] != i) {
1457 inodes[inomove[i]] = inodes[i];
1458 iflags[inomove[i]] = iflags[i] | IF_DIRTY;
1459 }
1460 }
1461 }
1462 /*
1463 * Update the directory contained in the nb bytes at buf, to point to
1464 * inodes' new locations.
1465 */
1466 static int
1467 update_dirents(char *buf, int nb)
1468 {
1469 int rv;
1470 #define d ((struct direct *)buf)
1471
1472 rv = 0;
1473 while (nb > 0) {
1474 if (inomove[d->d_ino] != d->d_ino) {
1475 rv++;
1476 d->d_ino = inomove[d->d_ino];
1477 }
1478 nb -= d->d_reclen;
1479 buf += d->d_reclen;
1480 }
1481 return (rv);
1482 #undef d
1483 }
1484 /*
1485 * Callback function for map_inode_data_blocks, for updating a
1486 * directory to point to new inode locations.
1487 */
1488 static void
1489 update_dir_data(unsigned int bn, unsigned int size, unsigned int nb, int kind)
1490 {
1491 if (kind == MDB_DATA) {
1492 union {
1493 struct direct d;
1494 char ch[MAXBSIZE];
1495 } buf;
1496 readat(fsbtodb(oldsb, bn), &buf, size << oldsb->fs_fshift);
1497 if (update_dirents((char *) &buf, nb)) {
1498 writeat(fsbtodb(oldsb, bn), &buf,
1499 size << oldsb->fs_fshift);
1500 }
1501 }
1502 }
1503 static void
1504 dirmove_callback(struct ufs1_dinode * di, unsigned int inum, void *arg)
1505 {
1506 switch (di->di_mode & IFMT) {
1507 case IFDIR:
1508 map_inode_data_blocks(di, &update_dir_data);
1509 break;
1510 }
1511 }
1512 /*
1513 * Update directory entries to point to new inode locations.
1514 */
1515 static void
1516 update_for_inode_move(void)
1517 {
1518 map_inodes(&dirmove_callback, newsb->fs_ncg, NULL);
1519 }
1520 /*
1521 * Shrink the filesystem.
1522 */
1523 static void
1524 shrink(void)
1525 {
1526 int i;
1527
1528 /* Load the inodes off disk - we'll need 'em. */
1529 loadinodes();
1530 /* Update the timestamp. */
1531 newsb->fs_time = timestamp();
1532 /* Update the size figures. */
1533 newsb->fs_size = dbtofsb(newsb, newsize);
1534 newsb->fs_old_ncyl = (newsb->fs_size * NSPF(newsb)) / newsb->fs_old_spc;
1535 newsb->fs_ncg = howmany(newsb->fs_old_ncyl, newsb->fs_old_cpg);
1536 /* Does the (new) last cg end before the end of its inode area? See
1537 * the similar code in grow() for more on this. */
1538 if (cgdmin(newsb, newsb->fs_ncg - 1) > newsb->fs_size) {
1539 newsb->fs_ncg--;
1540 newsb->fs_old_ncyl = newsb->fs_ncg * newsb->fs_old_cpg;
1541 newsb->fs_size = (newsb->fs_old_ncyl * newsb->fs_old_spc) / NSPF(newsb);
1542 printf("Warning: last cylinder group is too small;\n");
1543 printf(" dropping it. New size = %lu.\n",
1544 (unsigned long int) fsbtodb(newsb, newsb->fs_size));
1545 }
1546 /* Let's make sure we're not being shrunk into oblivion. */
1547 if (newsb->fs_ncg < 1) {
1548 printf("Size too small - filesystem would have no cylinders\n");
1549 exit(1);
1550 }
1551 /* Initialize for block motion. */
1552 blkmove_init();
1553 /* Update csum size, then fix up for the new size */
1554 newsb->fs_cssize = fragroundup(newsb,
1555 newsb->fs_ncg * sizeof(struct csum));
1556 csum_fixup();
1557 /* Evict data from any cgs being wholly eliminiated */
1558 for (i = newsb->fs_ncg; i < oldsb->fs_ncg; i++) {
1559 int base;
1560 int dlow;
1561 int dhigh;
1562 int dmax;
1563 base = cgbase(oldsb, i);
1564 dlow = cgsblock(oldsb, i) - base;
1565 dhigh = cgdmin(oldsb, i) - base;
1566 dmax = oldsb->fs_size - base;
1567 if (dmax > cgs[i]->cg_ndblk)
1568 dmax = cgs[i]->cg_ndblk;
1569 evict_data(cgs[i], 0, dlow);
1570 evict_data(cgs[i], dhigh, dmax - dhigh);
1571 newsb->fs_cstotal.cs_ndir -= cgs[i]->cg_cs.cs_ndir;
1572 newsb->fs_cstotal.cs_nifree -= cgs[i]->cg_cs.cs_nifree;
1573 newsb->fs_cstotal.cs_nffree -= cgs[i]->cg_cs.cs_nffree;
1574 newsb->fs_cstotal.cs_nbfree -= cgs[i]->cg_cs.cs_nbfree;
1575 }
1576 /* Update the new last cg. */
1577 cgs[newsb->fs_ncg - 1]->cg_ndblk = newsb->fs_size -
1578 ((newsb->fs_ncg - 1) * newsb->fs_fpg);
1579 /* Is the new last cg partial? If so, evict any data from the part
1580 * being shrunken away. */
1581 if (newsb->fs_size % newsb->fs_fpg) {
1582 struct cg *cg;
1583 int oldcgsize;
1584 int newcgsize;
1585 cg = cgs[newsb->fs_ncg - 1];
1586 newcgsize = newsb->fs_size % newsb->fs_fpg;
1587 oldcgsize = oldsb->fs_size - ((newsb->fs_ncg - 1) & oldsb->fs_fpg);
1588 if (oldcgsize > oldsb->fs_fpg)
1589 oldcgsize = oldsb->fs_fpg;
1590 evict_data(cg, newcgsize, oldcgsize - newcgsize);
1591 clr_bits(cg_blksfree(cg, 0), newcgsize, oldcgsize - newcgsize);
1592 }
1593 /* Find out whether we would run out of inodes. (Note we haven't
1594 * actually done anything to the filesystem yet; all those evict_data
1595 * calls just update blkmove.) */
1596 {
1597 int slop;
1598 slop = 0;
1599 for (i = 0; i < newsb->fs_ncg; i++)
1600 slop += cgs[i]->cg_cs.cs_nifree;
1601 for (; i < oldsb->fs_ncg; i++)
1602 slop -= oldsb->fs_ipg - cgs[i]->cg_cs.cs_nifree;
1603 if (slop < 0) {
1604 printf("Sorry, would run out of inodes\n");
1605 exit(1);
1606 }
1607 }
1608 /* Copy data, then update pointers to data. See the comment header on
1609 * perform_data_move for ordering considerations. */
1610 perform_data_move();
1611 update_for_data_move();
1612 /* Now do inodes. Initialize, evict, move, update - see the comment
1613 * header on perform_inode_move. */
1614 inomove_init();
1615 for (i = newsb->fs_ncg; i < oldsb->fs_ncg; i++)
1616 evict_inodes(cgs[i]);
1617 perform_inode_move();
1618 flush_inodes();
1619 update_for_inode_move();
1620 /* Recompute all the bitmaps; most of them probably need it anyway,
1621 * the rest are just paranoia and not wanting to have to bother
1622 * keeping track of exactly which ones require it. */
1623 for (i = 0; i < newsb->fs_ncg; i++)
1624 cgflags[i] |= CGF_DIRTY | CGF_BLKMAPS | CGF_INOMAPS;
1625 /* Update the cg_old_ncyl value for the last cylinder. The condition is
1626 * commented out because fsck whines if not - see the similar
1627 * condition in grow() for more. */
1628 /* XXX fix once fsck is fixed */
1629 /* if (newsb->fs_old_ncyl % newsb->fs_old_cpg) XXX */
1630 /*XXXJTK*/
1631 cgs[newsb->fs_ncg - 1]->cg_old_ncyl =
1632 newsb->fs_old_ncyl % newsb->fs_old_cpg;
1633 /* Make fs_dsize match the new reality. */
1634 recompute_fs_dsize();
1635 }
1636 /*
1637 * Recompute the block totals, block cluster summaries, and rotational
1638 * position summaries, for a given cg (specified by number), based on
1639 * its free-frag bitmap (cg_blksfree()[]).
1640 */
1641 static void
1642 rescan_blkmaps(int cgn)
1643 {
1644 struct cg *cg;
1645 int f;
1646 int b;
1647 int blkfree;
1648 int blkrun;
1649 int fragrun;
1650 int fwb;
1651
1652 cg = cgs[cgn];
1653 /* Subtract off the current totals from the sb's summary info */
1654 newsb->fs_cstotal.cs_nffree -= cg->cg_cs.cs_nffree;
1655 newsb->fs_cstotal.cs_nbfree -= cg->cg_cs.cs_nbfree;
1656 /* Clear counters and bitmaps. */
1657 cg->cg_cs.cs_nffree = 0;
1658 cg->cg_cs.cs_nbfree = 0;
1659 bzero(&cg->cg_frsum[0], MAXFRAG * sizeof(cg->cg_frsum[0]));
1660 bzero(&cg_blktot(cg, 0)[0],
1661 newsb->fs_old_cpg * sizeof(cg_blktot(cg, 0)[0]));
1662 bzero(&cg_blks(newsb, cg, 0, 0)[0],
1663 newsb->fs_old_cpg * newsb->fs_old_nrpos *
1664 sizeof(cg_blks(newsb, cg, 0, 0)[0]));
1665 if (newsb->fs_contigsumsize > 0) {
1666 cg->cg_nclusterblks = cg->cg_ndblk / newsb->fs_frag;
1667 bzero(&cg_clustersum(cg, 0)[1],
1668 newsb->fs_contigsumsize *
1669 sizeof(cg_clustersum(cg, 0)[1]));
1670 bzero(&cg_clustersfree(cg, 0)[0],
1671 howmany((newsb->fs_old_cpg * newsb->fs_old_spc) / NSPB(newsb),
1672 NBBY));
1673 }
1674 /* Scan the free-frag bitmap. Runs of free frags are kept track of
1675 * with fragrun, and recorded into cg_frsum[] and cg_cs.cs_nffree; on
1676 * each block boundary, entire free blocks are recorded as well. */
1677 blkfree = 1;
1678 blkrun = 0;
1679 fragrun = 0;
1680 f = 0;
1681 b = 0;
1682 fwb = 0;
1683 while (f < cg->cg_ndblk) {
1684 if (bit_is_set(cg_blksfree(cg, 0), f)) {
1685 fragrun++;
1686 } else {
1687 blkfree = 0;
1688 if (fragrun > 0) {
1689 cg->cg_frsum[fragrun]++;
1690 cg->cg_cs.cs_nffree += fragrun;
1691 }
1692 fragrun = 0;
1693 }
1694 f++;
1695 fwb++;
1696 if (fwb >= newsb->fs_frag) {
1697 if (blkfree) {
1698 cg->cg_cs.cs_nbfree++;
1699 if (newsb->fs_contigsumsize > 0)
1700 set_bits(cg_clustersfree(cg, 0), b, 1);
1701 cg_blktot(cg, 0)[cbtocylno(newsb, f - newsb->fs_frag)]++;
1702 cg_blks(newsb, cg,
1703 cbtocylno(newsb, f - newsb->fs_frag),
1704 0)[cbtorpos(newsb, f - newsb->fs_frag)]++;
1705 blkrun++;
1706 } else {
1707 if (fragrun > 0) {
1708 cg->cg_frsum[fragrun]++;
1709 cg->cg_cs.cs_nffree += fragrun;
1710 }
1711 if (newsb->fs_contigsumsize > 0) {
1712 if (blkrun > 0) {
1713 cg_clustersum(cg, 0)[(blkrun > newsb->fs_contigsumsize) ? newsb->fs_contigsumsize : blkrun]++;
1714 }
1715 }
1716 blkrun = 0;
1717 }
1718 fwb = 0;
1719 b++;
1720 blkfree = 1;
1721 fragrun = 0;
1722 }
1723 }
1724 if (fragrun > 0) {
1725 cg->cg_frsum[fragrun]++;
1726 cg->cg_cs.cs_nffree += fragrun;
1727 }
1728 if ((blkrun > 0) && (newsb->fs_contigsumsize > 0)) {
1729 cg_clustersum(cg, 0)[(blkrun > newsb->fs_contigsumsize) ?
1730 newsb->fs_contigsumsize : blkrun]++;
1731 }
1732 /*
1733 * Put the updated summary info back into csums, and add it
1734 * back into the sb's summary info. Then mark the cg dirty.
1735 */
1736 csums[cgn] = cg->cg_cs;
1737 newsb->fs_cstotal.cs_nffree += cg->cg_cs.cs_nffree;
1738 newsb->fs_cstotal.cs_nbfree += cg->cg_cs.cs_nbfree;
1739 cgflags[cgn] |= CGF_DIRTY;
1740 }
1741 /*
1742 * Recompute the cg_inosused()[] bitmap, and the cs_nifree and cs_ndir
1743 * values, for a cg, based on the in-core inodes for that cg.
1744 */
1745 static void
1746 rescan_inomaps(int cgn)
1747 {
1748 struct cg *cg;
1749 int inum;
1750 int iwc;
1751
1752 cg = cgs[cgn];
1753 newsb->fs_cstotal.cs_ndir -= cg->cg_cs.cs_ndir;
1754 newsb->fs_cstotal.cs_nifree -= cg->cg_cs.cs_nifree;
1755 cg->cg_cs.cs_ndir = 0;
1756 cg->cg_cs.cs_nifree = 0;
1757 bzero(&cg_inosused(cg, 0)[0], howmany(newsb->fs_ipg, NBBY));
1758 inum = cgn * newsb->fs_ipg;
1759 if (cgn == 0) {
1760 set_bits(cg_inosused(cg, 0), 0, 2);
1761 iwc = 2;
1762 inum += 2;
1763 } else {
1764 iwc = 0;
1765 }
1766 for (; iwc < newsb->fs_ipg; iwc++, inum++) {
1767 switch (inodes[inum].di_mode & IFMT) {
1768 case 0:
1769 cg->cg_cs.cs_nifree++;
1770 break;
1771 case IFDIR:
1772 cg->cg_cs.cs_ndir++;
1773 /* fall through */
1774 default:
1775 set_bits(cg_inosused(cg, 0), iwc, 1);
1776 break;
1777 }
1778 }
1779 csums[cgn] = cg->cg_cs;
1780 newsb->fs_cstotal.cs_ndir += cg->cg_cs.cs_ndir;
1781 newsb->fs_cstotal.cs_nifree += cg->cg_cs.cs_nifree;
1782 cgflags[cgn] |= CGF_DIRTY;
1783 }
1784 /*
1785 * Flush cgs to disk, recomputing anything they're marked as needing.
1786 */
1787 static void
1788 flush_cgs(void)
1789 {
1790 int i;
1791
1792 for (i = 0; i < newsb->fs_ncg; i++) {
1793 if (cgflags[i] & CGF_BLKMAPS) {
1794 rescan_blkmaps(i);
1795 }
1796 if (cgflags[i] & CGF_INOMAPS) {
1797 rescan_inomaps(i);
1798 }
1799 if (cgflags[i] & CGF_DIRTY) {
1800 cgs[i]->cg_rotor = 0;
1801 cgs[i]->cg_frotor = 0;
1802 cgs[i]->cg_irotor = 0;
1803 writeat(fsbtodb(newsb, cgtod(newsb, i)), cgs[i],
1804 cgblksz);
1805 }
1806 }
1807 writeat(fsbtodb(newsb, newsb->fs_csaddr), csums, newsb->fs_cssize);
1808 }
1809 /*
1810 * Write the superblock, both to the main superblock and to each cg's
1811 * alternative superblock.
1812 */
1813 static void
1814 write_sbs(void)
1815 {
1816 int i;
1817
1818 writeat(where, newsb, SBLOCKSIZE);
1819 for (i = 0; i < newsb->fs_ncg; i++) {
1820 writeat(fsbtodb(newsb, cgsblock(newsb, i)), newsb, SBLOCKSIZE);
1821 }
1822 }
1823 /*
1824 * main().
1825 */
1826 int main(int, char **);
1827 int
1828 main(int ac, char **av)
1829 {
1830 size_t i;
1831 if (ac != 3) {
1832 fprintf(stderr, "usage: %s filesystem new-size\n",
1833 getprogname());
1834 exit(1);
1835 }
1836 fd = open(av[1], O_RDWR, 0);
1837 if (fd < 0)
1838 err(1, "Cannot open `%s'", av[1]);
1839 checksmallio();
1840 newsize = atoi(av[2]);
1841 oldsb = (struct fs *) & sbbuf;
1842 newsb = (struct fs *) (SBLOCKSIZE + (char *) &sbbuf);
1843 for (where = search[i = 0]; search[i] != -1; where = search[++i]) {
1844 readat(where, oldsb, SBLOCKSIZE);
1845 if (oldsb->fs_magic == FS_UFS1_MAGIC)
1846 break;
1847 }
1848 if (where == (off_t)-1)
1849 errx(1, "Bad magic number");
1850 oldsb->fs_qbmask = ~(int64_t) oldsb->fs_bmask;
1851 oldsb->fs_qfmask = ~(int64_t) oldsb->fs_fmask;
1852 if (oldsb->fs_ipg % INOPB(oldsb)) {
1853 printf("ipg[%d] %% INOPB[%d] != 0\n", (int) oldsb->fs_ipg,
1854 (int) INOPB(oldsb));
1855 exit(1);
1856 }
1857 /* The superblock is bigger than struct fs (there are trailing tables,
1858 * of non-fixed size); make sure we copy the whole thing. SBLOCKSIZE may
1859 * be an over-estimate, but we do this just once, so being generous is
1860 * cheap. */
1861 bcopy(oldsb, newsb, SBLOCKSIZE);
1862 loadcgs();
1863 if (newsize > fsbtodb(oldsb, oldsb->fs_size)) {
1864 grow();
1865 } else if (newsize < fsbtodb(oldsb, oldsb->fs_size)) {
1866 shrink();
1867 }
1868 flush_cgs();
1869 write_sbs();
1870 exit(0);
1871 }
1872