Home | History | Annotate | Download | only in ffs

Lines Matching refs:fs

62 #include <ufs/ffs/fs.h>
75 static int32_t ffs_mapsearch(struct fs *, struct cg *, daddr_t, int);
104 struct fs *fs = ip->i_fs;
109 if (size > fs->fs_bsize || ffs_fragoff(fs, size) != 0) {
111 fs->fs_bsize, size);
113 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
115 if (bpref >= fs->fs_size)
118 cg = ino_to_cg(fs, ip->i_number);
120 cg = dtog(fs, bpref);
161 struct fs *fs;
165 fs = ip->i_fs;
166 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
167 if (lbn < UFS_NDADDR + FFS_NINDIR(fs)) {
168 cg = ino_to_cg(fs, ip->i_number);
169 return (fs->fs_fpg * cg + fs->fs_frag);
177 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
179 startcg = dtog(fs,
180 ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
181 startcg %= fs->fs_ncg;
182 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
183 for (cg = startcg; cg < fs->fs_ncg; cg++)
184 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
185 return (fs->fs_fpg * cg + fs->fs_frag);
187 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
188 return (fs->fs_fpg * cg + fs->fs_frag);
194 return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
200 struct fs *fs;
204 fs = ip->i_fs;
205 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
206 if (lbn < UFS_NDADDR + FFS_NINDIR(fs)) {
207 cg = ino_to_cg(fs, ip->i_number);
208 return (fs->fs_fpg * cg + fs->fs_frag);
216 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
218 startcg = dtog(fs,
219 ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
220 startcg %= fs->fs_ncg;
221 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
222 for (cg = startcg; cg < fs->fs_ncg; cg++)
223 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
224 return (fs->fs_fpg * cg + fs->fs_frag);
227 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
228 return (fs->fs_fpg * cg + fs->fs_frag);
235 return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
253 struct fs *fs;
257 fs = ip->i_fs;
267 for (i = 1; i < fs->fs_ncg; i *= 2) {
269 if (cg >= fs->fs_ncg)
270 cg -= fs->fs_ncg;
280 cg = (icg + 2) % fs->fs_ncg;
281 for (i = 2; i < fs->fs_ncg; i++) {
286 if (cg == fs->fs_ncg)
305 struct fs *fs = ip->i_fs;
306 const int needswap = UFS_FSNEEDSWAP(fs);
308 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
310 error = bread(ip->i_devvp, FFS_FSBTODB(fs, cgtod(fs, cg)),
311 (int)fs->fs_cgsize, 0, &bp);
317 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
321 if (size == fs->fs_bsize) {
331 frags = ffs_numfrags(fs, size);
332 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
335 if (allocsiz == fs->fs_frag) {
345 bpref = dtogd(fs, bno);
346 for (i = frags; i < fs->fs_frag; i++)
348 i = fs->fs_frag - frags;
350 fs->fs_cstotal.cs_nffree += i;
351 fs->fs_cs(fs, cg).cs_nffree += i;
352 fs->fs_fmod = 1;
357 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
361 fs->fs_cstotal.cs_nffree -= frags;
362 fs->fs_cs(fs, cg).cs_nffree -= frags;
363 fs->fs_fmod = 1;
367 blkno = cg * fs->fs_fpg + bno;
389 struct fs *fs = ip->i_fs;
390 const int needswap = UFS_FSNEEDSWAP(fs);
395 if (bpref == 0 || dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) {
398 bpref = ffs_blknum(fs, bpref);
399 bno = dtogd(fs, bpref);
403 if (ffs_isblock(fs, blksfree, ffs_fragstoblks(fs, bno)))
409 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
414 blkno = ffs_fragstoblks(fs, bno);
415 ffs_clrblock(fs, blksfree, (long)blkno);
416 ffs_clusteracct(fs, cgp, blkno, -1);
418 fs->fs_cstotal.cs_nbfree--;
419 fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--;
420 fs->fs_fmod = 1;
421 blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno;
439 struct fs *fs = ip->i_fs;
440 const int needswap = UFS_FSNEEDSWAP(fs);
442 if (size > fs->fs_bsize || ffs_fragoff(fs, size) != 0 ||
443 ffs_fragnum(fs, bno) + ffs_numfrags(fs, size) > fs->fs_frag) {
445 "size %ld", __func__, (long long)bno, fs->fs_bsize, size);
447 cg = dtog(fs, bno);
448 if (bno >= fs->fs_size) {
453 error = bread(ip->i_devvp, FFS_FSBTODB(fs, cgtod(fs, cg)),
454 (int)fs->fs_cgsize, 0, &bp);
463 cgbno = dtogd(fs, bno);
464 if (size == fs->fs_bsize) {
465 fragno = ffs_fragstoblks(fs, cgbno);
466 if (!ffs_isfreeblock(fs, cg_blksfree(cgp, needswap), fragno)) {
470 ffs_setblock(fs, cg_blksfree(cgp, needswap), fragno);
471 ffs_clusteracct(fs, cgp, fragno, 1);
473 fs->fs_cstotal.cs_nbfree++;
474 fs->fs_cs(fs, cg).cs_nbfree++;
476 bbase = cgbno - ffs_fragnum(fs, cgbno);
480 blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase);
481 ffs_fragacct(fs, blk, cgp->cg_frsum, -1, needswap);
485 frags = ffs_numfrags(fs, size);
495 fs->fs_cstotal.cs_nffree += i;
496 fs->fs_cs(fs, cg).cs_nffree += i;
500 blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase);
501 ffs_fragacct(fs, blk, cgp->cg_frsum, 1, needswap);
505 fragno = ffs_fragstoblks(fs, bbase);
506 if (ffs_isblock(fs, cg_blksfree(cgp, needswap), fragno)) {
507 ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap);
508 fs->fs_cstotal.cs_nffree -= fs->fs_frag;
509 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
510 ffs_clusteracct(fs, cgp, fragno, 1);
512 fs->fs_cstotal.cs_nbfree++;
513 fs->fs_cs(fs, cg).cs_nbfree++;
516 fs->fs_fmod = 1;
538 ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz)
544 const int needswap = UFS_FSNEEDSWAP(fs);
551 start = dtogd(fs, bpref) / NBBY;
554 len = howmany(fs->fs_fpg, NBBY) - start;
559 (const u_char *)fragtbl[fs->fs_frag],
560 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
566 (const u_char *)fragtbl[fs->fs_frag],
567 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
582 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
583 blk = blkmap(fs, cg_blksfree(cgp, needswap), bno);
587 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {