resize_ffs.c revision 1.29 1 /* $NetBSD: resize_ffs.c,v 1.29 2011/08/15 00:30:25 dholland Exp $ */
2 /* From sources sent on February 17, 2003 */
3 /*-
4 * As its sole author, I explicitly place this code in the public
5 * domain. Anyone may use it for any purpose (though I would
6 * appreciate credit where it is due).
7 *
8 * der Mouse
9 *
10 * mouse (at) rodents.montreal.qc.ca
11 * 7D C8 61 52 5D E7 2D 39 4E F1 31 3E E8 B3 27 4B
12 */
13 /*
14 * resize_ffs:
15 *
16 * Resize a file system. Is capable of both growing and shrinking.
17 *
18 * Usage: resize_ffs [-s newsize] [-y] file_system
19 *
20 * Example: resize_ffs -s 29574 /dev/rsd1e
21 *
22 * newsize is in DEV_BSIZE units (ie, disk sectors, usually 512 bytes
23 * each).
24 *
25 * Note: this currently requires gcc to build, since it is written
26 * depending on gcc-specific features, notably nested function
27 * definitions (which in at least a few cases depend on the lexical
28 * scoping gcc provides, so they can't be trivially moved outside).
29 *
30 * Many thanks go to John Kohl <jtk (at) NetBSD.org> for finding bugs: the
31 * one responsible for the "realloccgblk: can't find blk in cyl"
32 * problem and a more minor one which left fs_dsize wrong when
33 * shrinking. (These actually indicate bugs in fsck too - it should
34 * have caught and fixed them.)
35 *
36 */
37
38 #include <sys/cdefs.h>
39 __RCSID("$NetBSD: resize_ffs.c,v 1.29 2011/08/15 00:30:25 dholland Exp $");
40
41 #include <sys/disk.h>
42 #include <sys/disklabel.h>
43 #include <sys/dkio.h>
44 #include <sys/ioctl.h>
45 #include <sys/stat.h>
46 #include <sys/mman.h>
47 #include <sys/param.h> /* MAXFRAG */
48 #include <ufs/ffs/fs.h>
49 #include <ufs/ffs/ffs_extern.h>
50 #include <ufs/ufs/dir.h>
51 #include <ufs/ufs/dinode.h>
52 #include <ufs/ufs/ufs_bswap.h> /* ufs_rw32 */
53
54 #include <err.h>
55 #include <errno.h>
56 #include <fcntl.h>
57 #include <stdio.h>
58 #include <stdlib.h>
59 #include <strings.h>
60 #include <unistd.h>
61
62 /* new size of file system, in sectors */
63 static uint64_t newsize;
64
65 /* fd open onto disk device or file */
66 static int fd;
67
68 /* must we break up big I/O operations - see checksmallio() */
69 static int smallio;
70
71 /* size of a cg, in bytes, rounded up to a frag boundary */
72 static int cgblksz;
73
74 /* possible superblock localtions */
75 static int search[] = SBLOCKSEARCH;
76 /* location of the superblock */
77 static off_t where;
78
79 /* Superblocks. */
80 static struct fs *oldsb; /* before we started */
81 static struct fs *newsb; /* copy to work with */
82 /* Buffer to hold the above. Make sure it's aligned correctly. */
83 static char sbbuf[2 * SBLOCKSIZE]
84 __attribute__((__aligned__(__alignof__(struct fs))));
85
86 union dinode {
87 struct ufs1_dinode dp1;
88 struct ufs2_dinode dp2;
89 };
90 #define DIP(dp, field) \
91 ((is_ufs2) ? \
92 (dp)->dp2.field : (dp)->dp1.field)
93
94 #define DIP_ASSIGN(dp, field, value) \
95 do { \
96 if (is_ufs2) \
97 (dp)->dp2.field = (value); \
98 else \
99 (dp)->dp1.field = (value); \
100 } while (0)
101
102 /* a cg's worth of brand new squeaky-clean inodes */
103 static struct ufs1_dinode *zinodes;
104
105 /* pointers to the in-core cgs, read off disk and possibly modified */
106 static struct cg **cgs;
107
108 /* pointer to csum array - the stuff pointed to on-disk by fs_csaddr */
109 static struct csum *csums;
110
111 /* per-cg flags, indexed by cg number */
112 static unsigned char *cgflags;
113 #define CGF_DIRTY 0x01 /* needs to be written to disk */
114 #define CGF_BLKMAPS 0x02 /* block bitmaps need rebuilding */
115 #define CGF_INOMAPS 0x04 /* inode bitmaps need rebuilding */
116
117 /* when shrinking, these two arrays record how we want blocks to move. */
118 /* if blkmove[i] is j, the frag that started out as frag #i should end */
119 /* up as frag #j. inomove[i]=j means, similarly, that the inode that */
120 /* started out as inode i should end up as inode j. */
121 static unsigned int *blkmove;
122 static unsigned int *inomove;
123
124 /* in-core copies of all inodes in the fs, indexed by inumber */
125 union dinode *inodes;
126
127 void *ibuf; /* ptr to fs block-sized buffer for reading/writing inodes */
128
129 /* byteswapped inodes */
130 union dinode *sinodes;
131
132 /* per-inode flags, indexed by inumber */
133 static unsigned char *iflags;
134 #define IF_DIRTY 0x01 /* needs to be written to disk */
135 #define IF_BDIRTY 0x02 /* like DIRTY, but is set on first inode in a
136 * block of inodes, and applies to the whole
137 * block. */
138
139 /* resize_ffs works directly on dinodes, adapt blksize() */
140 #define dblksize(fs, dip, lbn) \
141 (((lbn) >= NDADDR || DIP((dip), di_size) >= lblktosize(fs, (lbn) + 1)) \
142 ? (fs)->fs_bsize \
143 : (fragroundup(fs, blkoff(fs, DIP((dip), di_size)))))
144
145
146 /*
147 * Number of disk sectors per block/fragment
148 */
149 #define NSPB(fs) (fsbtodb((fs),1) << (fs)->fs_fragshift)
150 #define NSPF(fs) (fsbtodb((fs),1))
151
152 /* global flags */
153 int is_ufs2 = 0;
154 int needswap = 0;
155
156 static void usage(void) __dead;
157
158 /*
159 * See if we need to break up large I/O operations. This should never
160 * be needed, but under at least one <version,platform> combination,
161 * large enough disk transfers to the raw device hang. So if we're
162 * talking to a character special device, play it safe; in this case,
163 * readat() and writeat() break everything up into pieces no larger
164 * than 8K, doing multiple syscalls for larger operations.
165 */
166 static void
167 checksmallio(void)
168 {
169 struct stat stb;
170
171 fstat(fd, &stb);
172 smallio = ((stb.st_mode & S_IFMT) == S_IFCHR);
173 }
174
175 static int
176 isplainfile(void)
177 {
178 struct stat stb;
179
180 fstat(fd, &stb);
181 return S_ISREG(stb.st_mode);
182 }
183 /*
184 * Read size bytes starting at blkno into buf. blkno is in DEV_BSIZE
185 * units, ie, after fsbtodb(); size is in bytes.
186 */
187 static void
188 readat(off_t blkno, void *buf, int size)
189 {
190 /* Seek to the correct place. */
191 if (lseek(fd, blkno * DEV_BSIZE, L_SET) < 0)
192 err(EXIT_FAILURE, "lseek failed");
193
194 /* See if we have to break up the transfer... */
195 if (smallio) {
196 char *bp; /* pointer into buf */
197 int left; /* bytes left to go */
198 int n; /* number to do this time around */
199 int rv; /* syscall return value */
200 bp = buf;
201 left = size;
202 while (left > 0) {
203 n = (left > 8192) ? 8192 : left;
204 rv = read(fd, bp, n);
205 if (rv < 0)
206 err(EXIT_FAILURE, "read failed");
207 if (rv != n)
208 errx(EXIT_FAILURE,
209 "read: wanted %d, got %d", n, rv);
210 bp += n;
211 left -= n;
212 }
213 } else {
214 int rv;
215 rv = read(fd, buf, size);
216 if (rv < 0)
217 err(EXIT_FAILURE, "read failed");
218 if (rv != size)
219 errx(EXIT_FAILURE, "read: wanted %d, got %d",
220 size, rv);
221 }
222 }
223 /*
224 * Write size bytes from buf starting at blkno. blkno is in DEV_BSIZE
225 * units, ie, after fsbtodb(); size is in bytes.
226 */
227 static void
228 writeat(off_t blkno, const void *buf, int size)
229 {
230 /* Seek to the correct place. */
231 if (lseek(fd, blkno * DEV_BSIZE, L_SET) < 0)
232 err(EXIT_FAILURE, "lseek failed");
233 /* See if we have to break up the transfer... */
234 if (smallio) {
235 const char *bp; /* pointer into buf */
236 int left; /* bytes left to go */
237 int n; /* number to do this time around */
238 int rv; /* syscall return value */
239 bp = buf;
240 left = size;
241 while (left > 0) {
242 n = (left > 8192) ? 8192 : left;
243 rv = write(fd, bp, n);
244 if (rv < 0)
245 err(EXIT_FAILURE, "write failed");
246 if (rv != n)
247 errx(EXIT_FAILURE,
248 "write: wanted %d, got %d", n, rv);
249 bp += n;
250 left -= n;
251 }
252 } else {
253 int rv;
254 rv = write(fd, buf, size);
255 if (rv < 0)
256 err(EXIT_FAILURE, "write failed");
257 if (rv != size)
258 errx(EXIT_FAILURE,
259 "write: wanted %d, got %d", size, rv);
260 }
261 }
262 /*
263 * Never-fail versions of malloc() and realloc(), and an allocation
264 * routine (which also never fails) for allocating memory that will
265 * never be freed until exit.
266 */
267
268 /*
269 * Never-fail malloc.
270 */
271 static void *
272 nfmalloc(size_t nb, const char *tag)
273 {
274 void *rv;
275
276 rv = malloc(nb);
277 if (rv)
278 return (rv);
279 err(EXIT_FAILURE, "Can't allocate %lu bytes for %s",
280 (unsigned long int) nb, tag);
281 }
282 /*
283 * Never-fail realloc.
284 */
285 static void *
286 nfrealloc(void *blk, size_t nb, const char *tag)
287 {
288 void *rv;
289
290 rv = realloc(blk, nb);
291 if (rv)
292 return (rv);
293 err(EXIT_FAILURE, "Can't re-allocate %lu bytes for %s",
294 (unsigned long int) nb, tag);
295 }
296 /*
297 * Allocate memory that will never be freed or reallocated. Arguably
298 * this routine should handle small allocations by chopping up pages,
299 * but that's not worth the bother; it's not called more than a
300 * handful of times per run, and if the allocations are that small the
301 * waste in giving each one its own page is ignorable.
302 */
303 static void *
304 alloconce(size_t nb, const char *tag)
305 {
306 void *rv;
307
308 rv = mmap(0, nb, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
309 if (rv != MAP_FAILED)
310 return (rv);
311 err(EXIT_FAILURE, "Can't map %lu bytes for %s",
312 (unsigned long int) nb, tag);
313 }
314 /*
315 * Load the cgs and csums off disk. Also allocates the space to load
316 * them into and initializes the per-cg flags.
317 */
318 static void
319 loadcgs(void)
320 {
321 int cg;
322 char *cgp;
323
324 cgblksz = roundup(oldsb->fs_cgsize, oldsb->fs_fsize);
325 cgs = nfmalloc(oldsb->fs_ncg * sizeof(struct cg *), "cg pointers");
326 cgp = alloconce(oldsb->fs_ncg * cgblksz, "cgs");
327 cgflags = nfmalloc(oldsb->fs_ncg, "cg flags");
328 csums = nfmalloc(oldsb->fs_cssize, "cg summary");
329 for (cg = 0; cg < oldsb->fs_ncg; cg++) {
330 cgs[cg] = (struct cg *) cgp;
331 readat(fsbtodb(oldsb, cgtod(oldsb, cg)), cgp, cgblksz);
332 if (needswap)
333 ffs_cg_swap(cgs[cg],cgs[cg],oldsb);
334 cgflags[cg] = 0;
335 cgp += cgblksz;
336 }
337 readat(fsbtodb(oldsb, oldsb->fs_csaddr), csums, oldsb->fs_cssize);
338 if (needswap)
339 ffs_csum_swap(csums,csums,oldsb->fs_cssize);
340 }
341 /*
342 * Set n bits, starting with bit #base, in the bitmap pointed to by
343 * bitvec (which is assumed to be large enough to include bits base
344 * through base+n-1).
345 */
346 static void
347 set_bits(unsigned char *bitvec, unsigned int base, unsigned int n)
348 {
349 if (n < 1)
350 return; /* nothing to do */
351 if (base & 7) { /* partial byte at beginning */
352 if (n <= 8 - (base & 7)) { /* entirely within one byte */
353 bitvec[base >> 3] |= (~((~0U) << n)) << (base & 7);
354 return;
355 }
356 bitvec[base >> 3] |= (~0U) << (base & 7);
357 n -= 8 - (base & 7);
358 base = (base & ~7) + 8;
359 }
360 if (n >= 8) { /* do full bytes */
361 memset(bitvec + (base >> 3), 0xff, n >> 3);
362 base += n & ~7;
363 n &= 7;
364 }
365 if (n) { /* partial byte at end */
366 bitvec[base >> 3] |= ~((~0U) << n);
367 }
368 }
369 /*
370 * Clear n bits, starting with bit #base, in the bitmap pointed to by
371 * bitvec (which is assumed to be large enough to include bits base
372 * through base+n-1). Code parallels set_bits().
373 */
374 static void
375 clr_bits(unsigned char *bitvec, int base, int n)
376 {
377 if (n < 1)
378 return;
379 if (base & 7) {
380 if (n <= 8 - (base & 7)) {
381 bitvec[base >> 3] &= ~((~((~0U) << n)) << (base & 7));
382 return;
383 }
384 bitvec[base >> 3] &= ~((~0U) << (base & 7));
385 n -= 8 - (base & 7);
386 base = (base & ~7) + 8;
387 }
388 if (n >= 8) {
389 memset(bitvec + (base >> 3), 0, n >> 3);
390 base += n & ~7;
391 n &= 7;
392 }
393 if (n) {
394 bitvec[base >> 3] &= (~0U) << n;
395 }
396 }
397 /*
398 * Test whether bit #bit is set in the bitmap pointed to by bitvec.
399 */
400 static int
401 bit_is_set(unsigned char *bitvec, int bit)
402 {
403 return (bitvec[bit >> 3] & (1 << (bit & 7)));
404 }
405 /*
406 * Test whether bit #bit is clear in the bitmap pointed to by bitvec.
407 */
408 static int
409 bit_is_clr(unsigned char *bitvec, int bit)
410 {
411 return (!bit_is_set(bitvec, bit));
412 }
413 /*
414 * Test whether a whole block of bits is set in a bitmap. This is
415 * designed for testing (aligned) disk blocks in a bit-per-frag
416 * bitmap; it has assumptions wired into it based on that, essentially
417 * that the entire block fits into a single byte. This returns true
418 * iff _all_ the bits are set; it is not just the complement of
419 * blk_is_clr on the same arguments (unless blkfrags==1).
420 */
421 static int
422 blk_is_set(unsigned char *bitvec, int blkbase, int blkfrags)
423 {
424 unsigned int mask;
425
426 mask = (~((~0U) << blkfrags)) << (blkbase & 7);
427 return ((bitvec[blkbase >> 3] & mask) == mask);
428 }
429 /*
430 * Test whether a whole block of bits is clear in a bitmap. See
431 * blk_is_set (above) for assumptions. This returns true iff _all_
432 * the bits are clear; it is not just the complement of blk_is_set on
433 * the same arguments (unless blkfrags==1).
434 */
435 static int
436 blk_is_clr(unsigned char *bitvec, int blkbase, int blkfrags)
437 {
438 unsigned int mask;
439
440 mask = (~((~0U) << blkfrags)) << (blkbase & 7);
441 return ((bitvec[blkbase >> 3] & mask) == 0);
442 }
443 /*
444 * Initialize a new cg. Called when growing. Assumes memory has been
445 * allocated but not otherwise set up. This code sets the fields of
446 * the cg, initializes the bitmaps (and cluster summaries, if
447 * applicable), updates both per-cylinder summary info and the global
448 * summary info in newsb; it also writes out new inodes for the cg.
449 *
450 * This code knows it can never be called for cg 0, which makes it a
451 * bit simpler than it would otherwise be.
452 */
453 static void
454 initcg(int cgn)
455 {
456 struct cg *cg; /* The in-core cg, of course */
457 int base; /* Disk address of cg base */
458 int dlow; /* Size of pre-cg data area */
459 int dhigh; /* Offset of post-inode data area, from base */
460 int dmax; /* Offset of end of post-inode data area */
461 int i; /* Generic loop index */
462 int n; /* Generic count */
463 int start; /* start of cg maps */
464
465 cg = cgs[cgn];
466 /* Place the data areas */
467 base = cgbase(newsb, cgn);
468 dlow = cgsblock(newsb, cgn) - base;
469 dhigh = cgdmin(newsb, cgn) - base;
470 dmax = newsb->fs_size - base;
471 if (dmax > newsb->fs_fpg)
472 dmax = newsb->fs_fpg;
473 start = &cg->cg_space[0] - (unsigned char *) cg;
474 /*
475 * Clear out the cg - assumes all-0-bytes is the correct way
476 * to initialize fields we don't otherwise touch, which is
477 * perhaps not the right thing to do, but it's what fsck and
478 * mkfs do.
479 */
480 memset(cg, 0, newsb->fs_cgsize);
481 if (newsb->fs_old_flags & FS_FLAGS_UPDATED)
482 cg->cg_time = newsb->fs_time;
483 cg->cg_magic = CG_MAGIC;
484 cg->cg_cgx = cgn;
485 cg->cg_niblk = newsb->fs_ipg;
486 cg->cg_ndblk = dmax;
487
488 if (is_ufs2) {
489 cg->cg_time = newsb->fs_time;
490 cg->cg_initediblk = newsb->fs_ipg < 2 * INOPB(newsb) ?
491 newsb->fs_ipg : 2 * INOPB(newsb);
492 cg->cg_iusedoff = start;
493 } else {
494 cg->cg_old_time = newsb->fs_time;
495 cg->cg_old_niblk = cg->cg_niblk;
496 cg->cg_niblk = 0;
497 cg->cg_initediblk = 0;
498
499
500 cg->cg_old_ncyl = newsb->fs_old_cpg;
501 /* Update the cg_old_ncyl value for the last cylinder. */
502 if (cgn == newsb->fs_ncg - 1) {
503 if ((newsb->fs_old_flags & FS_FLAGS_UPDATED) == 0)
504 cg->cg_old_ncyl = newsb->fs_old_ncyl %
505 newsb->fs_old_cpg;
506 }
507
508 /* Set up the bitmap pointers. We have to be careful
509 * to lay out the cg _exactly_ the way mkfs and fsck
510 * do it, since fsck compares the _entire_ cg against
511 * a recomputed cg, and whines if there is any
512 * mismatch, including the bitmap offsets. */
513 /* XXX update this comment when fsck is fixed */
514 cg->cg_old_btotoff = start;
515 cg->cg_old_boff = cg->cg_old_btotoff
516 + (newsb->fs_old_cpg * sizeof(int32_t));
517 cg->cg_iusedoff = cg->cg_old_boff +
518 (newsb->fs_old_cpg * newsb->fs_old_nrpos * sizeof(int16_t));
519 }
520 cg->cg_freeoff = cg->cg_iusedoff + howmany(newsb->fs_ipg, NBBY);
521 if (newsb->fs_contigsumsize > 0) {
522 cg->cg_nclusterblks = cg->cg_ndblk / newsb->fs_frag;
523 cg->cg_clustersumoff = cg->cg_freeoff +
524 howmany(newsb->fs_fpg, NBBY) - sizeof(int32_t);
525 cg->cg_clustersumoff =
526 roundup(cg->cg_clustersumoff, sizeof(int32_t));
527 cg->cg_clusteroff = cg->cg_clustersumoff +
528 ((newsb->fs_contigsumsize + 1) * sizeof(int32_t));
529 cg->cg_nextfreeoff = cg->cg_clusteroff +
530 howmany(fragstoblks(newsb,newsb->fs_fpg), NBBY);
531 n = dlow / newsb->fs_frag;
532 if (n > 0) {
533 set_bits(cg_clustersfree(cg, 0), 0, n);
534 cg_clustersum(cg, 0)[(n > newsb->fs_contigsumsize) ?
535 newsb->fs_contigsumsize : n]++;
536 }
537 } else {
538 cg->cg_nextfreeoff = cg->cg_freeoff +
539 howmany(newsb->fs_fpg, NBBY);
540 }
541 /* Mark the data areas as free; everything else is marked busy by the
542 * memset() up at the top. */
543 set_bits(cg_blksfree(cg, 0), 0, dlow);
544 set_bits(cg_blksfree(cg, 0), dhigh, dmax - dhigh);
545 /* Initialize summary info */
546 cg->cg_cs.cs_ndir = 0;
547 cg->cg_cs.cs_nifree = newsb->fs_ipg;
548 cg->cg_cs.cs_nbfree = dlow / newsb->fs_frag;
549 cg->cg_cs.cs_nffree = 0;
550
551 /* This is the simplest way of doing this; we perhaps could
552 * compute the correct cg_blktot()[] and cg_blks()[] values
553 * other ways, but it would be complicated and hardly seems
554 * worth the effort. (The reason there isn't
555 * frag-at-beginning and frag-at-end code here, like the code
556 * below for the post-inode data area, is that the pre-sb data
557 * area always starts at 0, and thus is block-aligned, and
558 * always ends at the sb, which is block-aligned.) */
559 if ((newsb->fs_old_flags & FS_FLAGS_UPDATED) == 0)
560 for (i = 0; i < dlow; i += newsb->fs_frag) {
561 old_cg_blktot(cg, 0)[old_cbtocylno(newsb, i)]++;
562 old_cg_blks(newsb, cg,
563 old_cbtocylno(newsb, i),
564 0)[old_cbtorpos(newsb, i)]++;
565 }
566
567 /* Deal with a partial block at the beginning of the post-inode area.
568 * I'm not convinced this can happen - I think the inodes are always
569 * block-aligned and always an integral number of blocks - but it's
570 * cheap to do the right thing just in case. */
571 if (dhigh % newsb->fs_frag) {
572 n = newsb->fs_frag - (dhigh % newsb->fs_frag);
573 cg->cg_frsum[n]++;
574 cg->cg_cs.cs_nffree += n;
575 dhigh += n;
576 }
577 n = (dmax - dhigh) / newsb->fs_frag;
578 /* We have n full-size blocks in the post-inode data area. */
579 if (n > 0) {
580 cg->cg_cs.cs_nbfree += n;
581 if (newsb->fs_contigsumsize > 0) {
582 i = dhigh / newsb->fs_frag;
583 set_bits(cg_clustersfree(cg, 0), i, n);
584 cg_clustersum(cg, 0)[(n > newsb->fs_contigsumsize) ?
585 newsb->fs_contigsumsize : n]++;
586 }
587 if (is_ufs2 == 0)
588 for (i = n; i > 0; i--) {
589 old_cg_blktot(cg, 0)[old_cbtocylno(newsb,
590 dhigh)]++;
591 old_cg_blks(newsb, cg,
592 old_cbtocylno(newsb, dhigh),
593 0)[old_cbtorpos(newsb,
594 dhigh)]++;
595 dhigh += newsb->fs_frag;
596 }
597 }
598 if (is_ufs2 == 0) {
599 /* Deal with any leftover frag at the end of the cg. */
600 i = dmax - dhigh;
601 if (i) {
602 cg->cg_frsum[i]++;
603 cg->cg_cs.cs_nffree += i;
604 }
605 }
606 /* Update the csum info. */
607 csums[cgn] = cg->cg_cs;
608 newsb->fs_cstotal.cs_nffree += cg->cg_cs.cs_nffree;
609 newsb->fs_cstotal.cs_nbfree += cg->cg_cs.cs_nbfree;
610 newsb->fs_cstotal.cs_nifree += cg->cg_cs.cs_nifree;
611 if (is_ufs2 == 0)
612 /* Write out the cleared inodes. */
613 writeat(fsbtodb(newsb, cgimin(newsb, cgn)), zinodes,
614 newsb->fs_ipg * sizeof(struct ufs1_dinode));
615 /* Dirty the cg. */
616 cgflags[cgn] |= CGF_DIRTY;
617 }
618 /*
619 * Find free space, at least nfrags consecutive frags of it. Pays no
620 * attention to block boundaries, but refuses to straddle cg
621 * boundaries, even if the disk blocks involved are in fact
622 * consecutive. Return value is the frag number of the first frag of
623 * the block, or -1 if no space was found. Uses newsb for sb values,
624 * and assumes the cgs[] structures correctly describe the area to be
625 * searched.
626 *
627 * XXX is there a bug lurking in the ignoring of block boundaries by
628 * the routine used by fragmove() in evict_data()? Can an end-of-file
629 * frag legally straddle a block boundary? If not, this should be
630 * cloned and fixed to stop at block boundaries for that use. The
631 * current one may still be needed for csum info motion, in case that
632 * takes up more than a whole block (is the csum info allowed to begin
633 * partway through a block and continue into the following block?).
634 *
635 * If we wrap off the end of the file system back to the beginning, we
636 * can end up searching the end of the file system twice. I ignore
637 * this inefficiency, since if that happens we're going to croak with
638 * a no-space error anyway, so it happens at most once.
639 */
640 static int
641 find_freespace(unsigned int nfrags)
642 {
643 static int hand = 0; /* hand rotates through all frags in the fs */
644 int cgsize; /* size of the cg hand currently points into */
645 int cgn; /* number of cg hand currently points into */
646 int fwc; /* frag-within-cg number of frag hand points
647 * to */
648 int run; /* length of run of free frags seen so far */
649 int secondpass; /* have we wrapped from end of fs to
650 * beginning? */
651 unsigned char *bits; /* cg_blksfree()[] for cg hand points into */
652
653 cgn = dtog(newsb, hand);
654 fwc = dtogd(newsb, hand);
655 secondpass = (hand == 0);
656 run = 0;
657 bits = cg_blksfree(cgs[cgn], 0);
658 cgsize = cgs[cgn]->cg_ndblk;
659 while (1) {
660 if (bit_is_set(bits, fwc)) {
661 run++;
662 if (run >= nfrags)
663 return (hand + 1 - run);
664 } else {
665 run = 0;
666 }
667 hand++;
668 fwc++;
669 if (fwc >= cgsize) {
670 fwc = 0;
671 cgn++;
672 if (cgn >= newsb->fs_ncg) {
673 hand = 0;
674 if (secondpass)
675 return (-1);
676 secondpass = 1;
677 cgn = 0;
678 }
679 bits = cg_blksfree(cgs[cgn], 0);
680 cgsize = cgs[cgn]->cg_ndblk;
681 run = 0;
682 }
683 }
684 }
685 /*
686 * Find a free block of disk space. Finds an entire block of frags,
687 * all of which are free. Return value is the frag number of the
688 * first frag of the block, or -1 if no space was found. Uses newsb
689 * for sb values, and assumes the cgs[] structures correctly describe
690 * the area to be searched.
691 *
692 * See find_freespace(), above, for remarks about hand wrapping around.
693 */
694 static int
695 find_freeblock(void)
696 {
697 static int hand = 0; /* hand rotates through all frags in fs */
698 int cgn; /* cg number of cg hand points into */
699 int fwc; /* frag-within-cg number of frag hand points
700 * to */
701 int cgsize; /* size of cg hand points into */
702 int secondpass; /* have we wrapped from end to beginning? */
703 unsigned char *bits; /* cg_blksfree()[] for cg hand points into */
704
705 cgn = dtog(newsb, hand);
706 fwc = dtogd(newsb, hand);
707 secondpass = (hand == 0);
708 bits = cg_blksfree(cgs[cgn], 0);
709 cgsize = blknum(newsb, cgs[cgn]->cg_ndblk);
710 while (1) {
711 if (blk_is_set(bits, fwc, newsb->fs_frag))
712 return (hand);
713 fwc += newsb->fs_frag;
714 hand += newsb->fs_frag;
715 if (fwc >= cgsize) {
716 fwc = 0;
717 cgn++;
718 if (cgn >= newsb->fs_ncg) {
719 hand = 0;
720 if (secondpass)
721 return (-1);
722 secondpass = 1;
723 cgn = 0;
724 }
725 bits = cg_blksfree(cgs[cgn], 0);
726 cgsize = blknum(newsb, cgs[cgn]->cg_ndblk);
727 }
728 }
729 }
730 /*
731 * Find a free inode, returning its inumber or -1 if none was found.
732 * Uses newsb for sb values, and assumes the cgs[] structures
733 * correctly describe the area to be searched.
734 *
735 * See find_freespace(), above, for remarks about hand wrapping around.
736 */
737 static int
738 find_freeinode(void)
739 {
740 static int hand = 0; /* hand rotates through all inodes in fs */
741 int cgn; /* cg number of cg hand points into */
742 int iwc; /* inode-within-cg number of inode hand points
743 * to */
744 int secondpass; /* have we wrapped from end to beginning? */
745 unsigned char *bits; /* cg_inosused()[] for cg hand points into */
746
747 cgn = hand / newsb->fs_ipg;
748 iwc = hand % newsb->fs_ipg;
749 secondpass = (hand == 0);
750 bits = cg_inosused(cgs[cgn], 0);
751 while (1) {
752 if (bit_is_clr(bits, iwc))
753 return (hand);
754 hand++;
755 iwc++;
756 if (iwc >= newsb->fs_ipg) {
757 iwc = 0;
758 cgn++;
759 if (cgn >= newsb->fs_ncg) {
760 hand = 0;
761 if (secondpass)
762 return (-1);
763 secondpass = 1;
764 cgn = 0;
765 }
766 bits = cg_inosused(cgs[cgn], 0);
767 }
768 }
769 }
770 /*
771 * Mark a frag as free. Sets the frag's bit in the cg_blksfree bitmap
772 * for the appropriate cg, and marks the cg as dirty.
773 */
774 static void
775 free_frag(int fno)
776 {
777 int cgn;
778
779 cgn = dtog(newsb, fno);
780 set_bits(cg_blksfree(cgs[cgn], 0), dtogd(newsb, fno), 1);
781 cgflags[cgn] |= CGF_DIRTY | CGF_BLKMAPS;
782 }
783 /*
784 * Allocate a frag. Clears the frag's bit in the cg_blksfree bitmap
785 * for the appropriate cg, and marks the cg as dirty.
786 */
787 static void
788 alloc_frag(int fno)
789 {
790 int cgn;
791
792 cgn = dtog(newsb, fno);
793 clr_bits(cg_blksfree(cgs[cgn], 0), dtogd(newsb, fno), 1);
794 cgflags[cgn] |= CGF_DIRTY | CGF_BLKMAPS;
795 }
796 /*
797 * Fix up the csum array. If shrinking, this involves freeing zero or
798 * more frags; if growing, it involves allocating them, or if the
799 * frags being grown into aren't free, finding space elsewhere for the
800 * csum info. (If the number of occupied frags doesn't change,
801 * nothing happens here.)
802 */
803 static void
804 csum_fixup(void)
805 {
806 int nold; /* # frags in old csum info */
807 int ntot; /* # frags in new csum info */
808 int nnew; /* ntot-nold */
809 int newloc; /* new location for csum info, if necessary */
810 int i; /* generic loop index */
811 int j; /* generic loop index */
812 int f; /* "from" frag number, if moving */
813 int t; /* "to" frag number, if moving */
814 int cgn; /* cg number, used when shrinking */
815
816 ntot = howmany(newsb->fs_cssize, newsb->fs_fsize);
817 nold = howmany(oldsb->fs_cssize, newsb->fs_fsize);
818 nnew = ntot - nold;
819 /* First, if there's no change in frag counts, it's easy. */
820 if (nnew == 0)
821 return;
822 /* Next, if we're shrinking, it's almost as easy. Just free up any
823 * frags in the old area we no longer need. */
824 if (nnew < 0) {
825 for ((i = newsb->fs_csaddr + ntot - 1), (j = nnew);
826 j < 0;
827 i--, j++) {
828 free_frag(i);
829 }
830 return;
831 }
832 /* We must be growing. Check to see that the new csum area fits
833 * within the file system. I think this can never happen, since for
834 * the csum area to grow, we must be adding at least one cg, so the
835 * old csum area can't be this close to the end of the new file system.
836 * But it's a cheap check. */
837 /* XXX what if csum info is at end of cg and grows into next cg, what
838 * if it spills over onto the next cg's backup superblock? Can this
839 * happen? */
840 if (newsb->fs_csaddr + ntot <= newsb->fs_size) {
841 /* Okay, it fits - now, see if the space we want is free. */
842 for ((i = newsb->fs_csaddr + nold), (j = nnew);
843 j > 0;
844 i++, j--) {
845 cgn = dtog(newsb, i);
846 if (bit_is_clr(cg_blksfree(cgs[cgn], 0),
847 dtogd(newsb, i)))
848 break;
849 }
850 if (j <= 0) {
851 /* Win win - all the frags we want are free. Allocate
852 * 'em and we're all done. */
853 for ((i = newsb->fs_csaddr + ntot - nnew),
854 (j = nnew); j > 0; i++, j--) {
855 alloc_frag(i);
856 }
857 return;
858 }
859 }
860 /* We have to move the csum info, sigh. Look for new space, free old
861 * space, and allocate new. Update fs_csaddr. We don't copy anything
862 * on disk at this point; the csum info will be written to the
863 * then-current fs_csaddr as part of the final flush. */
864 newloc = find_freespace(ntot);
865 if (newloc < 0) {
866 printf("Sorry, no space available for new csums\n");
867 exit(EXIT_FAILURE);
868 }
869 for (i = 0, f = newsb->fs_csaddr, t = newloc; i < ntot; i++, f++, t++) {
870 if (i < nold) {
871 free_frag(f);
872 }
873 alloc_frag(t);
874 }
875 newsb->fs_csaddr = newloc;
876 }
877 /*
878 * Recompute newsb->fs_dsize. Just scans all cgs, adding the number of
879 * data blocks in that cg to the total.
880 */
881 static void
882 recompute_fs_dsize(void)
883 {
884 int i;
885
886 newsb->fs_dsize = 0;
887 for (i = 0; i < newsb->fs_ncg; i++) {
888 int dlow; /* size of before-sb data area */
889 int dhigh; /* offset of post-inode data area */
890 int dmax; /* total size of cg */
891 int base; /* base of cg, since cgsblock() etc add it in */
892 base = cgbase(newsb, i);
893 dlow = cgsblock(newsb, i) - base;
894 dhigh = cgdmin(newsb, i) - base;
895 dmax = newsb->fs_size - base;
896 if (dmax > newsb->fs_fpg)
897 dmax = newsb->fs_fpg;
898 newsb->fs_dsize += dlow + dmax - dhigh;
899 }
900 /* Space in cg 0 before cgsblock is boot area, not free space! */
901 newsb->fs_dsize -= cgsblock(newsb, 0) - cgbase(newsb, 0);
902 /* And of course the csum info takes up space. */
903 newsb->fs_dsize -= howmany(newsb->fs_cssize, newsb->fs_fsize);
904 }
905 /*
906 * Return the current time. We call this and assign, rather than
907 * calling time() directly, as insulation against OSes where fs_time
908 * is not a time_t.
909 */
910 static time_t
911 timestamp(void)
912 {
913 time_t t;
914
915 time(&t);
916 return (t);
917 }
918 /*
919 * Grow the file system.
920 */
921 static void
922 grow(void)
923 {
924 int i;
925
926 /* Update the timestamp. */
927 newsb->fs_time = timestamp();
928 /* Allocate and clear the new-inode area, in case we add any cgs. */
929 zinodes = alloconce(newsb->fs_ipg * sizeof(struct ufs1_dinode),
930 "zeroed inodes");
931 memset(zinodes, 0, newsb->fs_ipg * sizeof(struct ufs1_dinode));
932 /* Update the size. */
933 newsb->fs_size = dbtofsb(newsb, newsize);
934 /* Did we actually not grow? (This can happen if newsize is less than
935 * a frag larger than the old size - unlikely, but no excuse to
936 * misbehave if it happens.) */
937 if (newsb->fs_size == oldsb->fs_size) {
938 printf("New fs size %"PRIu64" = odl fs size %"PRIu64
939 ", not growing.\n", newsb->fs_size, oldsb->fs_size);
940 return;
941 }
942 /* Check that the new last sector (frag, actually) is writable. Since
943 * it's at least one frag larger than it used to be, we know we aren't
944 * overwriting anything important by this. (The choice of sbbuf as
945 * what to write is irrelevant; it's just something handy that's known
946 * to be at least one frag in size.) */
947 writeat(fsbtodb(newsb,newsb->fs_size - 1), &sbbuf, newsb->fs_fsize);
948 if (is_ufs2)
949 newsb->fs_ncg = howmany(newsb->fs_size, newsb->fs_fpg);
950 else {
951 /* Update fs_old_ncyl and fs_ncg. */
952 newsb->fs_old_ncyl = howmany(newsb->fs_size * NSPF(newsb),
953 newsb->fs_old_spc);
954 newsb->fs_ncg = howmany(newsb->fs_old_ncyl, newsb->fs_old_cpg);
955 }
956
957 /* Does the last cg end before the end of its inode area? There is no
958 * reason why this couldn't be handled, but it would complicate a lot
959 * of code (in all file system code - fsck, kernel, etc) because of the
960 * potential partial inode area, and the gain in space would be
961 * minimal, at most the pre-sb data area. */
962 if (cgdmin(newsb, newsb->fs_ncg - 1) > newsb->fs_size) {
963 newsb->fs_ncg--;
964 newsb->fs_old_ncyl = newsb->fs_ncg * newsb->fs_old_cpg;
965 newsb->fs_size = (newsb->fs_old_ncyl * newsb->fs_old_spc)
966 / NSPF(newsb);
967 printf("Warning: last cylinder group is too small;\n");
968 printf(" dropping it. New size = %lu.\n",
969 (unsigned long int) fsbtodb(newsb, newsb->fs_size));
970 }
971 /* Find out how big the csum area is, and realloc csums if bigger. */
972 newsb->fs_cssize = fragroundup(newsb,
973 newsb->fs_ncg * sizeof(struct csum));
974 if (newsb->fs_cssize > oldsb->fs_cssize)
975 csums = nfrealloc(csums, newsb->fs_cssize, "new cg summary");
976 /* If we're adding any cgs, realloc structures and set up the new
977 cgs. */
978 if (newsb->fs_ncg > oldsb->fs_ncg) {
979 char *cgp;
980 cgs = nfrealloc(cgs, newsb->fs_ncg * sizeof(struct cg *),
981 "cg pointers");
982 cgflags = nfrealloc(cgflags, newsb->fs_ncg, "cg flags");
983 memset(cgflags + oldsb->fs_ncg, 0,
984 newsb->fs_ncg - oldsb->fs_ncg);
985 cgp = alloconce((newsb->fs_ncg - oldsb->fs_ncg) * cgblksz,
986 "cgs");
987 for (i = oldsb->fs_ncg; i < newsb->fs_ncg; i++) {
988 cgs[i] = (struct cg *) cgp;
989 initcg(i);
990 cgp += cgblksz;
991 }
992 cgs[oldsb->fs_ncg - 1]->cg_old_ncyl = oldsb->fs_old_cpg;
993 cgflags[oldsb->fs_ncg - 1] |= CGF_DIRTY;
994 }
995 /* If the old fs ended partway through a cg, we have to update the old
996 * last cg (though possibly not to a full cg!). */
997 if (oldsb->fs_size % oldsb->fs_fpg) {
998 struct cg *cg;
999 int newcgsize;
1000 int prevcgtop;
1001 int oldcgsize;
1002 cg = cgs[oldsb->fs_ncg - 1];
1003 cgflags[oldsb->fs_ncg - 1] |= CGF_DIRTY | CGF_BLKMAPS;
1004 prevcgtop = oldsb->fs_fpg * (oldsb->fs_ncg - 1);
1005 newcgsize = newsb->fs_size - prevcgtop;
1006 if (newcgsize > newsb->fs_fpg)
1007 newcgsize = newsb->fs_fpg;
1008 oldcgsize = oldsb->fs_size % oldsb->fs_fpg;
1009 set_bits(cg_blksfree(cg, 0), oldcgsize, newcgsize - oldcgsize);
1010 cg->cg_old_ncyl = oldsb->fs_old_cpg;
1011 cg->cg_ndblk = newcgsize;
1012 }
1013 /* Fix up the csum info, if necessary. */
1014 csum_fixup();
1015 /* Make fs_dsize match the new reality. */
1016 recompute_fs_dsize();
1017 }
1018 /*
1019 * Call (*fn)() for each inode, passing the inode and its inumber. The
1020 * number of cylinder groups is pased in, so this can be used to map
1021 * over either the old or the new file system's set of inodes.
1022 */
1023 static void
1024 map_inodes(void (*fn) (union dinode * di, unsigned int, void *arg),
1025 int ncg, void *cbarg) {
1026 int i;
1027 int ni;
1028
1029 ni = oldsb->fs_ipg * ncg;
1030 for (i = 0; i < ni; i++)
1031 (*fn) (inodes + i, i, cbarg);
1032 }
1033 /* Values for the third argument to the map function for
1034 * map_inode_data_blocks. MDB_DATA indicates the block is contains
1035 * file data; MDB_INDIR_PRE and MDB_INDIR_POST indicate that it's an
1036 * indirect block. The MDB_INDIR_PRE call is made before the indirect
1037 * block pointers are followed and the pointed-to blocks scanned,
1038 * MDB_INDIR_POST after.
1039 */
1040 #define MDB_DATA 1
1041 #define MDB_INDIR_PRE 2
1042 #define MDB_INDIR_POST 3
1043
1044 typedef void (*mark_callback_t) (unsigned int blocknum, unsigned int nfrags,
1045 unsigned int blksize, int opcode);
1046
1047 /* Helper function - handles a data block. Calls the callback
1048 * function and returns number of bytes occupied in file (actually,
1049 * rounded up to a frag boundary). The name is historical. */
1050 static int
1051 markblk(mark_callback_t fn, union dinode * di, int bn, off_t o)
1052 {
1053 int sz;
1054 int nb;
1055
1056 if (o >= DIP(di,di_size))
1057 return (0);
1058 sz = dblksize(newsb, di, lblkno(newsb, o));
1059 nb = (sz > DIP(di,di_size) - o) ? DIP(di,di_size) - o : sz;
1060 if (bn)
1061 (*fn) (bn, numfrags(newsb, sz), nb, MDB_DATA);
1062 return (sz);
1063 }
1064 /* Helper function - handles an indirect block. Makes the
1065 * MDB_INDIR_PRE callback for the indirect block, loops over the
1066 * pointers and recurses, and makes the MDB_INDIR_POST callback.
1067 * Returns the number of bytes occupied in file, as does markblk().
1068 * For the sake of update_for_data_move(), we read the indirect block
1069 * _after_ making the _PRE callback. The name is historical. */
1070 static int
1071 markiblk(mark_callback_t fn, union dinode * di, int bn, off_t o, int lev)
1072 {
1073 int i;
1074 int j;
1075 int tot;
1076 static int32_t indirblk1[howmany(MAXBSIZE, sizeof(int32_t))];
1077 static int32_t indirblk2[howmany(MAXBSIZE, sizeof(int32_t))];
1078 static int32_t indirblk3[howmany(MAXBSIZE, sizeof(int32_t))];
1079 static int32_t *indirblks[3] = {
1080 &indirblk1[0], &indirblk2[0], &indirblk3[0]
1081 };
1082
1083 if (lev < 0)
1084 return (markblk(fn, di, bn, o));
1085 if (bn == 0) {
1086 for (i = newsb->fs_bsize;
1087 lev >= 0;
1088 i *= NINDIR(newsb), lev--);
1089 return (i);
1090 }
1091 (*fn) (bn, newsb->fs_frag, newsb->fs_bsize, MDB_INDIR_PRE);
1092 readat(fsbtodb(newsb, bn), indirblks[lev], newsb->fs_bsize);
1093 if (needswap)
1094 for (i = 0; i < howmany(MAXBSIZE, sizeof(int32_t)); i++)
1095 indirblks[lev][i] = bswap32(indirblks[lev][i]);
1096 tot = 0;
1097 for (i = 0; i < NINDIR(newsb); i++) {
1098 j = markiblk(fn, di, indirblks[lev][i], o, lev - 1);
1099 if (j == 0)
1100 break;
1101 o += j;
1102 tot += j;
1103 }
1104 (*fn) (bn, newsb->fs_frag, newsb->fs_bsize, MDB_INDIR_POST);
1105 return (tot);
1106 }
1107
1108
1109 /*
1110 * Call (*fn)() for each data block for an inode. This routine assumes
1111 * the inode is known to be of a type that has data blocks (file,
1112 * directory, or non-fast symlink). The called function is:
1113 *
1114 * (*fn)(unsigned int blkno, unsigned int nf, unsigned int nb, int op)
1115 *
1116 * where blkno is the frag number, nf is the number of frags starting
1117 * at blkno (always <= fs_frag), nb is the number of bytes that belong
1118 * to the file (usually nf*fs_frag, often less for the last block/frag
1119 * of a file).
1120 */
1121 static void
1122 map_inode_data_blocks(union dinode * di, mark_callback_t fn)
1123 {
1124 off_t o; /* offset within inode */
1125 int inc; /* increment for o - maybe should be off_t? */
1126 int b; /* index within di_db[] and di_ib[] arrays */
1127
1128 /* Scan the direct blocks... */
1129 o = 0;
1130 for (b = 0; b < NDADDR; b++) {
1131 inc = markblk(fn, di, DIP(di,di_db[b]), o);
1132 if (inc == 0)
1133 break;
1134 o += inc;
1135 }
1136 /* ...and the indirect blocks. */
1137 if (inc) {
1138 for (b = 0; b < NIADDR; b++) {
1139 inc = markiblk(fn, di, DIP(di,di_ib[b]), o, b);
1140 if (inc == 0)
1141 return;
1142 o += inc;
1143 }
1144 }
1145 }
1146
1147 static void
1148 dblk_callback(union dinode * di, unsigned int inum, void *arg)
1149 {
1150 mark_callback_t fn;
1151
1152 fn = (mark_callback_t) arg;
1153 switch (DIP(di,di_mode) & IFMT) {
1154 case IFLNK:
1155 if (DIP(di,di_size) > newsb->fs_maxsymlinklen) {
1156 case IFDIR:
1157 case IFREG:
1158 map_inode_data_blocks(di, fn);
1159 }
1160 break;
1161 }
1162 }
1163 /*
1164 * Make a callback call, a la map_inode_data_blocks, for all data
1165 * blocks in the entire fs. This is used only once, in
1166 * update_for_data_move, but it's out at top level because the complex
1167 * downward-funarg nesting that would otherwise result seems to give
1168 * gcc gastric distress.
1169 */
1170 static void
1171 map_data_blocks(mark_callback_t fn, int ncg)
1172 {
1173 map_inodes(&dblk_callback, ncg, (void *) fn);
1174 }
1175 /*
1176 * Initialize the blkmove array.
1177 */
1178 static void
1179 blkmove_init(void)
1180 {
1181 int i;
1182
1183 blkmove = alloconce(oldsb->fs_size * sizeof(*blkmove), "blkmove");
1184 for (i = 0; i < oldsb->fs_size; i++)
1185 blkmove[i] = i;
1186 }
1187 /*
1188 * Load the inodes off disk. Allocates the structures and initializes
1189 * them - the inodes from disk, the flags to zero.
1190 */
1191 static void
1192 loadinodes(void)
1193 {
1194 int imax, ino, i, j;
1195 struct ufs1_dinode *dp1 = NULL;
1196 struct ufs2_dinode *dp2 = NULL;
1197
1198 /* read inodes one fs block at a time and copy them */
1199
1200 inodes = alloconce(oldsb->fs_ncg * oldsb->fs_ipg *
1201 sizeof(union dinode), "inodes");
1202 iflags = alloconce(oldsb->fs_ncg * oldsb->fs_ipg, "inode flags");
1203 memset(iflags, 0, oldsb->fs_ncg * oldsb->fs_ipg);
1204
1205 ibuf = nfmalloc(oldsb->fs_bsize,"inode block buf");
1206 if (is_ufs2)
1207 dp2 = (struct ufs2_dinode *)ibuf;
1208 else
1209 dp1 = (struct ufs1_dinode *)ibuf;
1210
1211 for (ino = 0,imax = oldsb->fs_ipg * oldsb->fs_ncg; ino < imax; ) {
1212 readat(fsbtodb(oldsb, ino_to_fsba(oldsb, ino)), ibuf,
1213 oldsb->fs_bsize);
1214
1215 for (i = 0; i < oldsb->fs_inopb; i++) {
1216 if (is_ufs2) {
1217 if (needswap) {
1218 ffs_dinode2_swap(&(dp2[i]), &(dp2[i]));
1219 for (j = 0; j < NDADDR + NIADDR; j++)
1220 dp2[i].di_db[j] =
1221 bswap32(dp2[i].di_db[j]);
1222 }
1223 memcpy(&inodes[ino].dp2, &dp2[i],
1224 sizeof(struct ufs2_dinode));
1225 } else {
1226 if (needswap) {
1227 ffs_dinode1_swap(&(dp1[i]), &(dp1[i]));
1228 for (j = 0; j < NDADDR + NIADDR; j++)
1229 dp1[i].di_db[j] =
1230 bswap32(dp1[i].di_db[j]);
1231 }
1232 memcpy(&inodes[ino].dp1, &dp1[i],
1233 sizeof(struct ufs1_dinode));
1234 }
1235 if (++ino > imax)
1236 errx(EXIT_FAILURE,
1237 "Exceeded number of inodes");
1238 }
1239
1240 }
1241 }
1242 /*
1243 * Report a file-system-too-full problem.
1244 */
1245 static void
1246 toofull(void)
1247 {
1248 printf("Sorry, would run out of data blocks\n");
1249 exit(EXIT_FAILURE);
1250 }
1251 /*
1252 * Record a desire to move "n" frags from "from" to "to".
1253 */
1254 static void
1255 mark_move(unsigned int from, unsigned int to, unsigned int n)
1256 {
1257 for (; n > 0; n--)
1258 blkmove[from++] = to++;
1259 }
1260 /* Helper function - evict n frags, starting with start (cg-relative).
1261 * The free bitmap is scanned, unallocated frags are ignored, and
1262 * each block of consecutive allocated frags is moved as a unit.
1263 */
1264 static void
1265 fragmove(struct cg * cg, int base, unsigned int start, unsigned int n)
1266 {
1267 int i;
1268 int run;
1269
1270 run = 0;
1271 for (i = 0; i <= n; i++) {
1272 if ((i < n) && bit_is_clr(cg_blksfree(cg, 0), start + i)) {
1273 run++;
1274 } else {
1275 if (run > 0) {
1276 int off;
1277 off = find_freespace(run);
1278 if (off < 0)
1279 toofull();
1280 mark_move(base + start + i - run, off, run);
1281 set_bits(cg_blksfree(cg, 0), start + i - run,
1282 run);
1283 clr_bits(cg_blksfree(cgs[dtog(oldsb, off)], 0),
1284 dtogd(oldsb, off), run);
1285 }
1286 run = 0;
1287 }
1288 }
1289 }
1290 /*
1291 * Evict all data blocks from the given cg, starting at minfrag (based
1292 * at the beginning of the cg), for length nfrag. The eviction is
1293 * assumed to be entirely data-area; this should not be called with a
1294 * range overlapping the metadata structures in the cg. It also
1295 * assumes minfrag points into the given cg; it will misbehave if this
1296 * is not true.
1297 *
1298 * See the comment header on find_freespace() for one possible bug
1299 * lurking here.
1300 */
1301 static void
1302 evict_data(struct cg * cg, unsigned int minfrag, unsigned int nfrag)
1303 {
1304 int base; /* base of cg (in frags from beginning of fs) */
1305
1306 base = cgbase(oldsb, cg->cg_cgx);
1307 /* Does the boundary fall in the middle of a block? To avoid
1308 * breaking between frags allocated as consecutive, we always
1309 * evict the whole block in this case, though one could argue
1310 * we should check to see if the frag before or after the
1311 * break is unallocated. */
1312 if (minfrag % oldsb->fs_frag) {
1313 int n;
1314 n = minfrag % oldsb->fs_frag;
1315 minfrag -= n;
1316 nfrag += n;
1317 }
1318 /* Do whole blocks. If a block is wholly free, skip it; if
1319 * wholly allocated, move it in toto. If neither, call
1320 * fragmove() to move the frags to new locations. */
1321 while (nfrag >= oldsb->fs_frag) {
1322 if (!blk_is_set(cg_blksfree(cg, 0), minfrag, oldsb->fs_frag)) {
1323 if (blk_is_clr(cg_blksfree(cg, 0), minfrag,
1324 oldsb->fs_frag)) {
1325 int off;
1326 off = find_freeblock();
1327 if (off < 0)
1328 toofull();
1329 mark_move(base + minfrag, off, oldsb->fs_frag);
1330 set_bits(cg_blksfree(cg, 0), minfrag,
1331 oldsb->fs_frag);
1332 clr_bits(cg_blksfree(cgs[dtog(oldsb, off)], 0),
1333 dtogd(oldsb, off), oldsb->fs_frag);
1334 } else {
1335 fragmove(cg, base, minfrag, oldsb->fs_frag);
1336 }
1337 }
1338 minfrag += oldsb->fs_frag;
1339 nfrag -= oldsb->fs_frag;
1340 }
1341 /* Clean up any sub-block amount left over. */
1342 if (nfrag) {
1343 fragmove(cg, base, minfrag, nfrag);
1344 }
1345 }
1346 /*
1347 * Move all data blocks according to blkmove. We have to be careful,
1348 * because we may be updating indirect blocks that will themselves be
1349 * getting moved, or inode int32_t arrays that point to indirect
1350 * blocks that will be moved. We call this before
1351 * update_for_data_move, and update_for_data_move does inodes first,
1352 * then indirect blocks in preorder, so as to make sure that the
1353 * file system is self-consistent at all points, for better crash
1354 * tolerance. (We can get away with this only because all the writes
1355 * done by perform_data_move() are writing into space that's not used
1356 * by the old file system.) If we crash, some things may point to the
1357 * old data and some to the new, but both copies are the same. The
1358 * only wrong things should be csum info and free bitmaps, which fsck
1359 * is entirely capable of cleaning up.
1360 *
1361 * Since blkmove_init() initializes all blocks to move to their current
1362 * locations, we can have two blocks marked as wanting to move to the
1363 * same location, but only two and only when one of them is the one
1364 * that was already there. So if blkmove[i]==i, we ignore that entry
1365 * entirely - for unallocated blocks, we don't want it (and may be
1366 * putting something else there), and for allocated blocks, we don't
1367 * want to copy it anywhere.
1368 */
1369 static void
1370 perform_data_move(void)
1371 {
1372 int i;
1373 int run;
1374 int maxrun;
1375 char buf[65536];
1376
1377 maxrun = sizeof(buf) / newsb->fs_fsize;
1378 run = 0;
1379 for (i = 0; i < oldsb->fs_size; i++) {
1380 if ((blkmove[i] == i) ||
1381 (run >= maxrun) ||
1382 ((run > 0) &&
1383 (blkmove[i] != blkmove[i - 1] + 1))) {
1384 if (run > 0) {
1385 readat(fsbtodb(oldsb, i - run), &buf[0],
1386 run << oldsb->fs_fshift);
1387 writeat(fsbtodb(oldsb, blkmove[i - run]),
1388 &buf[0], run << oldsb->fs_fshift);
1389 }
1390 run = 0;
1391 }
1392 if (blkmove[i] != i)
1393 run++;
1394 }
1395 if (run > 0) {
1396 readat(fsbtodb(oldsb, i - run), &buf[0],
1397 run << oldsb->fs_fshift);
1398 writeat(fsbtodb(oldsb, blkmove[i - run]), &buf[0],
1399 run << oldsb->fs_fshift);
1400 }
1401 }
1402 /*
1403 * This modifies an array of int32_t, according to blkmove. This is
1404 * used to update inode block arrays and indirect blocks to point to
1405 * the new locations of data blocks.
1406 *
1407 * Return value is the number of int32_ts that needed updating; in
1408 * particular, the return value is zero iff nothing was modified.
1409 */
1410 static int
1411 movemap_blocks(int32_t * vec, int n)
1412 {
1413 int rv;
1414
1415 rv = 0;
1416 for (; n > 0; n--, vec++) {
1417 if (blkmove[*vec] != *vec) {
1418 *vec = blkmove[*vec];
1419 rv++;
1420 }
1421 }
1422 return (rv);
1423 }
1424 static void
1425 moveblocks_callback(union dinode * di, unsigned int inum, void *arg)
1426 {
1427 void *dblkptr, *iblkptr; /* XXX */
1428
1429 switch (DIP(di,di_mode) & IFMT) {
1430 case IFLNK:
1431 if (DIP(di,di_size) <= oldsb->fs_maxsymlinklen) {
1432 break;
1433 }
1434 /* FALLTHROUGH */
1435 case IFDIR:
1436 case IFREG:
1437 if (is_ufs2) {
1438 dblkptr = &(di->dp2.di_db[0]);
1439 iblkptr = &(di->dp2.di_ib[0]);
1440 } else {
1441 dblkptr = &(di->dp1.di_db[0]);
1442 iblkptr = &(di->dp1.di_ib[0]);
1443 }
1444 /*
1445 * Don't || these two calls; we need their
1446 * side-effects.
1447 */
1448 if (movemap_blocks(dblkptr, NDADDR)) {
1449 iflags[inum] |= IF_DIRTY;
1450 }
1451 if (movemap_blocks(iblkptr, NIADDR)) {
1452 iflags[inum] |= IF_DIRTY;
1453 }
1454 break;
1455 }
1456 }
1457
1458 static void
1459 moveindir_callback(unsigned int off, unsigned int nfrag, unsigned int nbytes,
1460 int kind)
1461 {
1462 int i;
1463
1464 if (kind == MDB_INDIR_PRE) {
1465 int32_t blk[howmany(MAXBSIZE, sizeof(int32_t))];
1466 readat(fsbtodb(oldsb, off), &blk[0], oldsb->fs_bsize);
1467 if (needswap)
1468 for (i = 0; i < howmany(MAXBSIZE, sizeof(int32_t)); i++)
1469 blk[i] = bswap32(blk[i]);
1470 if (movemap_blocks(&blk[0], NINDIR(oldsb))) {
1471 if (needswap)
1472 for (i = 0; i < howmany(MAXBSIZE,
1473 sizeof(int32_t)); i++)
1474 blk[i] = bswap32(blk[i]);
1475 writeat(fsbtodb(oldsb, off), &blk[0], oldsb->fs_bsize);
1476 }
1477 }
1478 }
1479 /*
1480 * Update all inode data arrays and indirect blocks to point to the new
1481 * locations of data blocks. See the comment header on
1482 * perform_data_move for some ordering considerations.
1483 */
1484 static void
1485 update_for_data_move(void)
1486 {
1487 map_inodes(&moveblocks_callback, oldsb->fs_ncg, NULL);
1488 map_data_blocks(&moveindir_callback, oldsb->fs_ncg);
1489 }
1490 /*
1491 * Initialize the inomove array.
1492 */
1493 static void
1494 inomove_init(void)
1495 {
1496 int i;
1497
1498 inomove = alloconce(oldsb->fs_ipg * oldsb->fs_ncg * sizeof(*inomove),
1499 "inomove");
1500 for (i = (oldsb->fs_ipg * oldsb->fs_ncg) - 1; i >= 0; i--)
1501 inomove[i] = i;
1502 }
1503 /*
1504 * Flush all dirtied inodes to disk. Scans the inode flags array; for
1505 * each dirty inode, it sets the BDIRTY bit on the first inode in the
1506 * block containing the dirty inode. Then it scans by blocks, and for
1507 * each marked block, writes it.
1508 */
1509 static void
1510 flush_inodes(void)
1511 {
1512 int i, j, k, na, ni, m;
1513 struct ufs1_dinode *dp1 = NULL;
1514 struct ufs2_dinode *dp2 = NULL;
1515
1516 na = NDADDR + NIADDR;
1517 ni = newsb->fs_ipg * newsb->fs_ncg;
1518 m = INOPB(newsb) - 1;
1519 for (i = 0; i < ni; i++) {
1520 if (iflags[i] & IF_DIRTY) {
1521 iflags[i & ~m] |= IF_BDIRTY;
1522 }
1523 }
1524 m++;
1525
1526 if (is_ufs2)
1527 dp2 = (struct ufs2_dinode *)ibuf;
1528 else
1529 dp1 = (struct ufs1_dinode *)ibuf;
1530
1531 for (i = 0; i < ni; i += m) {
1532 if (iflags[i] & IF_BDIRTY) {
1533 if (is_ufs2)
1534 for (j = 0; j < m; j++) {
1535 dp2[j] = inodes[i + j].dp2;
1536 if (needswap) {
1537 for (k = 0; k < na; k++)
1538 dp2[j].di_db[k]=
1539 bswap32(dp2[j].di_db[k]);
1540 ffs_dinode2_swap(&dp2[j],
1541 &dp2[j]);
1542 }
1543 }
1544 else
1545 for (j = 0; j < m; j++) {
1546 dp1[j] = inodes[i + j].dp1;
1547 if (needswap) {
1548 for (k = 0; k < na; k++)
1549 dp1[j].di_db[k]=
1550 bswap32(dp1[j].di_db[k]);
1551 ffs_dinode1_swap(&dp1[j],
1552 &dp1[j]);
1553 }
1554 }
1555
1556 writeat(fsbtodb(newsb, ino_to_fsba(newsb, i)),
1557 ibuf, newsb->fs_bsize);
1558 }
1559 }
1560 }
1561 /*
1562 * Evict all inodes from the specified cg. shrink() already checked
1563 * that there were enough free inodes, so the no-free-inodes check is
1564 * a can't-happen. If it does trip, the file system should be in good
1565 * enough shape for fsck to fix; see the comment on perform_data_move
1566 * for the considerations in question.
1567 */
1568 static void
1569 evict_inodes(struct cg * cg)
1570 {
1571 int inum;
1572 int i;
1573 int fi;
1574
1575 inum = newsb->fs_ipg * cg->cg_cgx;
1576 for (i = 0; i < newsb->fs_ipg; i++, inum++) {
1577 if (DIP(inodes + inum,di_mode) != 0) {
1578 fi = find_freeinode();
1579 if (fi < 0) {
1580 printf("Sorry, inodes evaporated - "
1581 "file system probably needs fsck\n");
1582 exit(EXIT_FAILURE);
1583 }
1584 inomove[inum] = fi;
1585 clr_bits(cg_inosused(cg, 0), i, 1);
1586 set_bits(cg_inosused(cgs[ino_to_cg(newsb, fi)], 0),
1587 fi % newsb->fs_ipg, 1);
1588 }
1589 }
1590 }
1591 /*
1592 * Move inodes from old locations to new. Does not actually write
1593 * anything to disk; just copies in-core and sets dirty bits.
1594 *
1595 * We have to be careful here for reasons similar to those mentioned in
1596 * the comment header on perform_data_move, above: for the sake of
1597 * crash tolerance, we want to make sure everything is present at both
1598 * old and new locations before we update pointers. So we call this
1599 * first, then flush_inodes() to get them out on disk, then update
1600 * directories to match.
1601 */
1602 static void
1603 perform_inode_move(void)
1604 {
1605 int i;
1606 int ni;
1607
1608 ni = oldsb->fs_ipg * oldsb->fs_ncg;
1609 for (i = 0; i < ni; i++) {
1610 if (inomove[i] != i) {
1611 inodes[inomove[i]] = inodes[i];
1612 iflags[inomove[i]] = iflags[i] | IF_DIRTY;
1613 }
1614 }
1615 }
1616 /*
1617 * Update the directory contained in the nb bytes at buf, to point to
1618 * inodes' new locations.
1619 */
1620 static int
1621 update_dirents(char *buf, int nb)
1622 {
1623 int rv;
1624 #define d ((struct direct *)buf)
1625 #define s32(x) (needswap?bswap32((x)):(x))
1626 #define s16(x) (needswap?bswap16((x)):(x))
1627
1628 rv = 0;
1629 while (nb > 0) {
1630 if (inomove[s32(d->d_ino)] != s32(d->d_ino)) {
1631 rv++;
1632 d->d_ino = s32(inomove[s32(d->d_ino)]);
1633 }
1634 nb -= s16(d->d_reclen);
1635 buf += s16(d->d_reclen);
1636 }
1637 return (rv);
1638 #undef d
1639 #undef s32
1640 #undef s16
1641 }
1642 /*
1643 * Callback function for map_inode_data_blocks, for updating a
1644 * directory to point to new inode locations.
1645 */
1646 static void
1647 update_dir_data(unsigned int bn, unsigned int size, unsigned int nb, int kind)
1648 {
1649 if (kind == MDB_DATA) {
1650 union {
1651 struct direct d;
1652 char ch[MAXBSIZE];
1653 } buf;
1654 readat(fsbtodb(oldsb, bn), &buf, size << oldsb->fs_fshift);
1655 if (update_dirents((char *) &buf, nb)) {
1656 writeat(fsbtodb(oldsb, bn), &buf,
1657 size << oldsb->fs_fshift);
1658 }
1659 }
1660 }
1661 static void
1662 dirmove_callback(union dinode * di, unsigned int inum, void *arg)
1663 {
1664 switch (DIP(di,di_mode) & IFMT) {
1665 case IFDIR:
1666 map_inode_data_blocks(di, &update_dir_data);
1667 break;
1668 }
1669 }
1670 /*
1671 * Update directory entries to point to new inode locations.
1672 */
1673 static void
1674 update_for_inode_move(void)
1675 {
1676 map_inodes(&dirmove_callback, newsb->fs_ncg, NULL);
1677 }
1678 /*
1679 * Shrink the file system.
1680 */
1681 static void
1682 shrink(void)
1683 {
1684 int i;
1685
1686 /* Load the inodes off disk - we'll need 'em. */
1687 loadinodes();
1688 /* Update the timestamp. */
1689 newsb->fs_time = timestamp();
1690 /* Update the size figures. */
1691 newsb->fs_size = dbtofsb(newsb, newsize);
1692 if (is_ufs2)
1693 newsb->fs_ncg = howmany(newsb->fs_size, newsb->fs_fpg);
1694 else {
1695 newsb->fs_old_ncyl = howmany(newsb->fs_size * NSPF(newsb),
1696 newsb->fs_old_spc);
1697 newsb->fs_ncg = howmany(newsb->fs_old_ncyl, newsb->fs_old_cpg);
1698 }
1699 /* Does the (new) last cg end before the end of its inode area? See
1700 * the similar code in grow() for more on this. */
1701 if (cgdmin(newsb, newsb->fs_ncg - 1) > newsb->fs_size) {
1702 newsb->fs_ncg--;
1703 if (is_ufs2 == 0) {
1704 newsb->fs_old_ncyl = newsb->fs_ncg * newsb->fs_old_cpg;
1705 newsb->fs_size = (newsb->fs_old_ncyl *
1706 newsb->fs_old_spc) / NSPF(newsb);
1707 } else
1708 newsb->fs_size = newsb->fs_ncg * newsb->fs_fpg;
1709
1710 printf("Warning: last cylinder group is too small;\n");
1711 printf(" dropping it. New size = %lu.\n",
1712 (unsigned long int) fsbtodb(newsb, newsb->fs_size));
1713 }
1714 /* Let's make sure we're not being shrunk into oblivion. */
1715 if (newsb->fs_ncg < 1) {
1716 printf("Size too small - file system would "
1717 "have no cylinders\n");
1718 exit(EXIT_FAILURE);
1719 }
1720 /* Initialize for block motion. */
1721 blkmove_init();
1722 /* Update csum size, then fix up for the new size */
1723 newsb->fs_cssize = fragroundup(newsb,
1724 newsb->fs_ncg * sizeof(struct csum));
1725 csum_fixup();
1726 /* Evict data from any cgs being wholly eliminated */
1727 for (i = newsb->fs_ncg; i < oldsb->fs_ncg; i++) {
1728 int base;
1729 int dlow;
1730 int dhigh;
1731 int dmax;
1732 base = cgbase(oldsb, i);
1733 dlow = cgsblock(oldsb, i) - base;
1734 dhigh = cgdmin(oldsb, i) - base;
1735 dmax = oldsb->fs_size - base;
1736 if (dmax > cgs[i]->cg_ndblk)
1737 dmax = cgs[i]->cg_ndblk;
1738 evict_data(cgs[i], 0, dlow);
1739 evict_data(cgs[i], dhigh, dmax - dhigh);
1740 newsb->fs_cstotal.cs_ndir -= cgs[i]->cg_cs.cs_ndir;
1741 newsb->fs_cstotal.cs_nifree -= cgs[i]->cg_cs.cs_nifree;
1742 newsb->fs_cstotal.cs_nffree -= cgs[i]->cg_cs.cs_nffree;
1743 newsb->fs_cstotal.cs_nbfree -= cgs[i]->cg_cs.cs_nbfree;
1744 }
1745 /* Update the new last cg. */
1746 cgs[newsb->fs_ncg - 1]->cg_ndblk = newsb->fs_size -
1747 ((newsb->fs_ncg - 1) * newsb->fs_fpg);
1748 /* Is the new last cg partial? If so, evict any data from the part
1749 * being shrunken away. */
1750 if (newsb->fs_size % newsb->fs_fpg) {
1751 struct cg *cg;
1752 int oldcgsize;
1753 int newcgsize;
1754 cg = cgs[newsb->fs_ncg - 1];
1755 newcgsize = newsb->fs_size % newsb->fs_fpg;
1756 oldcgsize = oldsb->fs_size - ((newsb->fs_ncg - 1) &
1757 oldsb->fs_fpg);
1758 if (oldcgsize > oldsb->fs_fpg)
1759 oldcgsize = oldsb->fs_fpg;
1760 evict_data(cg, newcgsize, oldcgsize - newcgsize);
1761 clr_bits(cg_blksfree(cg, 0), newcgsize, oldcgsize - newcgsize);
1762 }
1763 /* Find out whether we would run out of inodes. (Note we
1764 * haven't actually done anything to the file system yet; all
1765 * those evict_data calls just update blkmove.) */
1766 {
1767 int slop;
1768 slop = 0;
1769 for (i = 0; i < newsb->fs_ncg; i++)
1770 slop += cgs[i]->cg_cs.cs_nifree;
1771 for (; i < oldsb->fs_ncg; i++)
1772 slop -= oldsb->fs_ipg - cgs[i]->cg_cs.cs_nifree;
1773 if (slop < 0) {
1774 printf("Sorry, would run out of inodes\n");
1775 exit(EXIT_FAILURE);
1776 }
1777 }
1778 /* Copy data, then update pointers to data. See the comment
1779 * header on perform_data_move for ordering considerations. */
1780 perform_data_move();
1781 update_for_data_move();
1782 /* Now do inodes. Initialize, evict, move, update - see the
1783 * comment header on perform_inode_move. */
1784 inomove_init();
1785 for (i = newsb->fs_ncg; i < oldsb->fs_ncg; i++)
1786 evict_inodes(cgs[i]);
1787 perform_inode_move();
1788 flush_inodes();
1789 update_for_inode_move();
1790 /* Recompute all the bitmaps; most of them probably need it anyway,
1791 * the rest are just paranoia and not wanting to have to bother
1792 * keeping track of exactly which ones require it. */
1793 for (i = 0; i < newsb->fs_ncg; i++)
1794 cgflags[i] |= CGF_DIRTY | CGF_BLKMAPS | CGF_INOMAPS;
1795 /* Update the cg_old_ncyl value for the last cylinder. */
1796 if ((newsb->fs_old_flags & FS_FLAGS_UPDATED) == 0)
1797 cgs[newsb->fs_ncg - 1]->cg_old_ncyl =
1798 newsb->fs_old_ncyl % newsb->fs_old_cpg;
1799 /* Make fs_dsize match the new reality. */
1800 recompute_fs_dsize();
1801 }
1802 /*
1803 * Recompute the block totals, block cluster summaries, and rotational
1804 * position summaries, for a given cg (specified by number), based on
1805 * its free-frag bitmap (cg_blksfree()[]).
1806 */
1807 static void
1808 rescan_blkmaps(int cgn)
1809 {
1810 struct cg *cg;
1811 int f;
1812 int b;
1813 int blkfree;
1814 int blkrun;
1815 int fragrun;
1816 int fwb;
1817
1818 cg = cgs[cgn];
1819 /* Subtract off the current totals from the sb's summary info */
1820 newsb->fs_cstotal.cs_nffree -= cg->cg_cs.cs_nffree;
1821 newsb->fs_cstotal.cs_nbfree -= cg->cg_cs.cs_nbfree;
1822 /* Clear counters and bitmaps. */
1823 cg->cg_cs.cs_nffree = 0;
1824 cg->cg_cs.cs_nbfree = 0;
1825 memset(&cg->cg_frsum[0], 0, MAXFRAG * sizeof(cg->cg_frsum[0]));
1826 memset(&old_cg_blktot(cg, 0)[0], 0,
1827 newsb->fs_old_cpg * sizeof(old_cg_blktot(cg, 0)[0]));
1828 memset(&old_cg_blks(newsb, cg, 0, 0)[0], 0,
1829 newsb->fs_old_cpg * newsb->fs_old_nrpos *
1830 sizeof(old_cg_blks(newsb, cg, 0, 0)[0]));
1831 if (newsb->fs_contigsumsize > 0) {
1832 cg->cg_nclusterblks = cg->cg_ndblk / newsb->fs_frag;
1833 memset(&cg_clustersum(cg, 0)[1], 0,
1834 newsb->fs_contigsumsize *
1835 sizeof(cg_clustersum(cg, 0)[1]));
1836 if (is_ufs2)
1837 memset(&cg_clustersfree(cg, 0)[0], 0,
1838 howmany(newsb->fs_fpg / NSPB(newsb), NBBY));
1839 else
1840 memset(&cg_clustersfree(cg, 0)[0], 0,
1841 howmany((newsb->fs_old_cpg * newsb->fs_old_spc) /
1842 NSPB(newsb), NBBY));
1843 }
1844 /* Scan the free-frag bitmap. Runs of free frags are kept
1845 * track of with fragrun, and recorded into cg_frsum[] and
1846 * cg_cs.cs_nffree; on each block boundary, entire free blocks
1847 * are recorded as well. */
1848 blkfree = 1;
1849 blkrun = 0;
1850 fragrun = 0;
1851 f = 0;
1852 b = 0;
1853 fwb = 0;
1854 while (f < cg->cg_ndblk) {
1855 if (bit_is_set(cg_blksfree(cg, 0), f)) {
1856 fragrun++;
1857 } else {
1858 blkfree = 0;
1859 if (fragrun > 0) {
1860 cg->cg_frsum[fragrun]++;
1861 cg->cg_cs.cs_nffree += fragrun;
1862 }
1863 fragrun = 0;
1864 }
1865 f++;
1866 fwb++;
1867 if (fwb >= newsb->fs_frag) {
1868 if (blkfree) {
1869 cg->cg_cs.cs_nbfree++;
1870 if (newsb->fs_contigsumsize > 0)
1871 set_bits(cg_clustersfree(cg, 0), b, 1);
1872 if (is_ufs2 == 0) {
1873 old_cg_blktot(cg, 0)[
1874 old_cbtocylno(newsb,
1875 f - newsb->fs_frag)]++;
1876 old_cg_blks(newsb, cg,
1877 old_cbtocylno(newsb,
1878 f - newsb->fs_frag),
1879 0)[old_cbtorpos(newsb,
1880 f - newsb->fs_frag)]++;
1881 }
1882 blkrun++;
1883 } else {
1884 if (fragrun > 0) {
1885 cg->cg_frsum[fragrun]++;
1886 cg->cg_cs.cs_nffree += fragrun;
1887 }
1888 if (newsb->fs_contigsumsize > 0) {
1889 if (blkrun > 0) {
1890 cg_clustersum(cg, 0)[(blkrun
1891 > newsb->fs_contigsumsize)
1892 ? newsb->fs_contigsumsize
1893 : blkrun]++;
1894 }
1895 }
1896 blkrun = 0;
1897 }
1898 fwb = 0;
1899 b++;
1900 blkfree = 1;
1901 fragrun = 0;
1902 }
1903 }
1904 if (fragrun > 0) {
1905 cg->cg_frsum[fragrun]++;
1906 cg->cg_cs.cs_nffree += fragrun;
1907 }
1908 if ((blkrun > 0) && (newsb->fs_contigsumsize > 0)) {
1909 cg_clustersum(cg, 0)[(blkrun > newsb->fs_contigsumsize) ?
1910 newsb->fs_contigsumsize : blkrun]++;
1911 }
1912 /*
1913 * Put the updated summary info back into csums, and add it
1914 * back into the sb's summary info. Then mark the cg dirty.
1915 */
1916 csums[cgn] = cg->cg_cs;
1917 newsb->fs_cstotal.cs_nffree += cg->cg_cs.cs_nffree;
1918 newsb->fs_cstotal.cs_nbfree += cg->cg_cs.cs_nbfree;
1919 cgflags[cgn] |= CGF_DIRTY;
1920 }
1921 /*
1922 * Recompute the cg_inosused()[] bitmap, and the cs_nifree and cs_ndir
1923 * values, for a cg, based on the in-core inodes for that cg.
1924 */
1925 static void
1926 rescan_inomaps(int cgn)
1927 {
1928 struct cg *cg;
1929 int inum;
1930 int iwc;
1931
1932 cg = cgs[cgn];
1933 newsb->fs_cstotal.cs_ndir -= cg->cg_cs.cs_ndir;
1934 newsb->fs_cstotal.cs_nifree -= cg->cg_cs.cs_nifree;
1935 cg->cg_cs.cs_ndir = 0;
1936 cg->cg_cs.cs_nifree = 0;
1937 memset(&cg_inosused(cg, 0)[0], 0, howmany(newsb->fs_ipg, NBBY));
1938 inum = cgn * newsb->fs_ipg;
1939 if (cgn == 0) {
1940 set_bits(cg_inosused(cg, 0), 0, 2);
1941 iwc = 2;
1942 inum += 2;
1943 } else {
1944 iwc = 0;
1945 }
1946 for (; iwc < newsb->fs_ipg; iwc++, inum++) {
1947 switch (DIP(inodes + inum, di_mode) & IFMT) {
1948 case 0:
1949 cg->cg_cs.cs_nifree++;
1950 break;
1951 case IFDIR:
1952 cg->cg_cs.cs_ndir++;
1953 /* fall through */
1954 default:
1955 set_bits(cg_inosused(cg, 0), iwc, 1);
1956 break;
1957 }
1958 }
1959 csums[cgn] = cg->cg_cs;
1960 newsb->fs_cstotal.cs_ndir += cg->cg_cs.cs_ndir;
1961 newsb->fs_cstotal.cs_nifree += cg->cg_cs.cs_nifree;
1962 cgflags[cgn] |= CGF_DIRTY;
1963 }
1964 /*
1965 * Flush cgs to disk, recomputing anything they're marked as needing.
1966 */
1967 static void
1968 flush_cgs(void)
1969 {
1970 int i;
1971
1972 for (i = 0; i < newsb->fs_ncg; i++) {
1973 if (cgflags[i] & CGF_BLKMAPS) {
1974 rescan_blkmaps(i);
1975 }
1976 if (cgflags[i] & CGF_INOMAPS) {
1977 rescan_inomaps(i);
1978 }
1979 if (cgflags[i] & CGF_DIRTY) {
1980 cgs[i]->cg_rotor = 0;
1981 cgs[i]->cg_frotor = 0;
1982 cgs[i]->cg_irotor = 0;
1983 if (needswap)
1984 ffs_cg_swap(cgs[i],cgs[i],newsb);
1985 writeat(fsbtodb(newsb, cgtod(newsb, i)), cgs[i],
1986 cgblksz);
1987 }
1988 }
1989 if (needswap)
1990 ffs_csum_swap(csums,csums,newsb->fs_cssize);
1991 writeat(fsbtodb(newsb, newsb->fs_csaddr), csums, newsb->fs_cssize);
1992 }
1993 /*
1994 * Write the superblock, both to the main superblock and to each cg's
1995 * alternative superblock.
1996 */
1997 static void
1998 write_sbs(void)
1999 {
2000 int i;
2001
2002 if (newsb->fs_magic == FS_UFS1_MAGIC &&
2003 (newsb->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
2004 newsb->fs_old_time = newsb->fs_time;
2005 newsb->fs_old_size = newsb->fs_size;
2006 /* we don't update fs_csaddr */
2007 newsb->fs_old_dsize = newsb->fs_dsize;
2008 newsb->fs_old_cstotal.cs_ndir = newsb->fs_cstotal.cs_ndir;
2009 newsb->fs_old_cstotal.cs_nbfree = newsb->fs_cstotal.cs_nbfree;
2010 newsb->fs_old_cstotal.cs_nifree = newsb->fs_cstotal.cs_nifree;
2011 newsb->fs_old_cstotal.cs_nffree = newsb->fs_cstotal.cs_nffree;
2012 /* fill fs_old_postbl_start with 256 bytes of 0xff? */
2013 }
2014 /* copy newsb back to oldsb, so we can use it for offsets if
2015 newsb has been swapped for writing to disk */
2016 memcpy(oldsb, newsb, SBLOCKSIZE);
2017 if (needswap)
2018 ffs_sb_swap(newsb,newsb);
2019 writeat(where / DEV_BSIZE, newsb, SBLOCKSIZE);
2020 for (i = 0; i < oldsb->fs_ncg; i++) {
2021 writeat(fsbtodb(oldsb, cgsblock(oldsb, i)), newsb, SBLOCKSIZE);
2022 }
2023 }
2024
2025 static uint32_t
2026 get_dev_size(char *dev_name)
2027 {
2028 struct dkwedge_info dkw;
2029 struct partition *pp;
2030 struct disklabel lp;
2031 size_t ptn;
2032
2033 /* Get info about partition/wedge */
2034 if (ioctl(fd, DIOCGWEDGEINFO, &dkw) == -1) {
2035 if (ioctl(fd, DIOCGDINFO, &lp) == -1)
2036 return 0;
2037
2038 ptn = strchr(dev_name, '\0')[-1] - 'a';
2039 if (ptn >= lp.d_npartitions)
2040 return 0;
2041
2042 pp = &lp.d_partitions[ptn];
2043 return pp->p_size;
2044 }
2045
2046 return dkw.dkw_size;
2047 }
2048
2049 /*
2050 * main().
2051 */
2052 int
2053 main(int argc, char **argv)
2054 {
2055 int ch;
2056 int ExpertFlag;
2057 int SFlag;
2058 size_t i;
2059
2060 char *special;
2061 char reply[5];
2062
2063 newsize = 0;
2064 ExpertFlag = 0;
2065 SFlag = 0;
2066
2067 while ((ch = getopt(argc, argv, "s:y")) != -1) {
2068 switch (ch) {
2069 case 's':
2070 SFlag = 1;
2071 newsize = (size_t)strtoul(optarg, NULL, 10);
2072 if(newsize < 1) {
2073 usage();
2074 }
2075 break;
2076 case 'y':
2077 ExpertFlag = 1;
2078 break;
2079 case '?':
2080 /* FALLTHROUGH */
2081 default:
2082 usage();
2083 }
2084 }
2085 argc -= optind;
2086 argv += optind;
2087
2088 if (argc != 1) {
2089 usage();
2090 }
2091
2092 special = *argv;
2093
2094 if (ExpertFlag == 0) {
2095 printf("It's required to manually run fsck on file system "
2096 "before you can resize it\n\n"
2097 " Did you run fsck on your disk (Yes/No) ? ");
2098 fgets(reply, (int)sizeof(reply), stdin);
2099 if (strcasecmp(reply, "Yes\n")) {
2100 printf("\n Nothing done \n");
2101 exit(EXIT_SUCCESS);
2102 }
2103 }
2104
2105 fd = open(special, O_RDWR, 0);
2106 if (fd < 0)
2107 err(EXIT_FAILURE, "Can't open `%s'", special);
2108 checksmallio();
2109
2110 if (SFlag == 0) {
2111 newsize = get_dev_size(special);
2112 if (newsize == 0)
2113 err(EXIT_FAILURE,
2114 "Can't resize file system, newsize not known.");
2115 }
2116
2117 oldsb = (struct fs *) & sbbuf;
2118 newsb = (struct fs *) (SBLOCKSIZE + (char *) &sbbuf);
2119 for (where = search[i = 0]; search[i] != -1; where = search[++i]) {
2120 readat(where / DEV_BSIZE, oldsb, SBLOCKSIZE);
2121 switch (oldsb->fs_magic) {
2122 case FS_UFS2_MAGIC:
2123 /* FALLTHROUGH */
2124 is_ufs2 = 1;
2125 case FS_UFS1_MAGIC:
2126 needswap = 0;
2127 break;
2128 case FS_UFS2_MAGIC_SWAPPED:
2129 is_ufs2 = 1;
2130 /* FALLTHROUGH */
2131 case FS_UFS1_MAGIC_SWAPPED:
2132 needswap = 1;
2133 break;
2134 default:
2135 continue;
2136 }
2137 if (!is_ufs2 && where == SBLOCK_UFS2)
2138 continue;
2139 break;
2140 }
2141 if (where == (off_t)-1)
2142 errx(EXIT_FAILURE, "Bad magic number");
2143 if (needswap)
2144 ffs_sb_swap(oldsb,oldsb);
2145 if (oldsb->fs_magic == FS_UFS1_MAGIC &&
2146 (oldsb->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
2147 oldsb->fs_csaddr = oldsb->fs_old_csaddr;
2148 oldsb->fs_size = oldsb->fs_old_size;
2149 oldsb->fs_dsize = oldsb->fs_old_dsize;
2150 oldsb->fs_cstotal.cs_ndir = oldsb->fs_old_cstotal.cs_ndir;
2151 oldsb->fs_cstotal.cs_nbfree = oldsb->fs_old_cstotal.cs_nbfree;
2152 oldsb->fs_cstotal.cs_nifree = oldsb->fs_old_cstotal.cs_nifree;
2153 oldsb->fs_cstotal.cs_nffree = oldsb->fs_old_cstotal.cs_nffree;
2154 /* any others? */
2155 printf("Resizing with ffsv1 superblock\n");
2156 }
2157
2158 oldsb->fs_qbmask = ~(int64_t) oldsb->fs_bmask;
2159 oldsb->fs_qfmask = ~(int64_t) oldsb->fs_fmask;
2160 if (oldsb->fs_ipg % INOPB(oldsb)) {
2161 (void)fprintf(stderr, "ipg[%d] %% INOPB[%d] != 0\n",
2162 (int) oldsb->fs_ipg, (int) INOPB(oldsb));
2163 exit(EXIT_FAILURE);
2164 }
2165 /* The superblock is bigger than struct fs (there are trailing
2166 * tables, of non-fixed size); make sure we copy the whole
2167 * thing. SBLOCKSIZE may be an over-estimate, but we do this
2168 * just once, so being generous is cheap. */
2169 memcpy(newsb, oldsb, SBLOCKSIZE);
2170 loadcgs();
2171 if (newsize > fsbtodb(oldsb, oldsb->fs_size)) {
2172 grow();
2173 } else if (newsize < fsbtodb(oldsb, oldsb->fs_size)) {
2174 if (is_ufs2)
2175 errx(EXIT_FAILURE,"shrinking not supported for ufs2");
2176 shrink();
2177 }
2178 flush_cgs();
2179 write_sbs();
2180 if (isplainfile())
2181 ftruncate(fd,newsize * DEV_BSIZE);
2182 return 0;
2183 }
2184
2185 static void
2186 usage(void)
2187 {
2188
2189 (void)fprintf(stderr, "usage: %s [-y] [-s size] special\n",
2190 getprogname());
2191 exit(EXIT_FAILURE);
2192 }
2193