1 1.33 andvar /* $NetBSD: ffs_alloc.c,v 1.33 2025/06/27 19:55:38 andvar Exp $ */ 2 1.1 lukem /* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */ 3 1.1 lukem 4 1.1 lukem /* 5 1.12 fvdl * Copyright (c) 2002 Networks Associates Technology, Inc. 6 1.12 fvdl * All rights reserved. 7 1.12 fvdl * 8 1.12 fvdl * This software was developed for the FreeBSD Project by Marshall 9 1.12 fvdl * Kirk McKusick and Network Associates Laboratories, the Security 10 1.12 fvdl * Research Division of Network Associates, Inc. under DARPA/SPAWAR 11 1.12 fvdl * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS 12 1.12 fvdl * research program 13 1.12 fvdl * 14 1.1 lukem * Copyright (c) 1982, 1986, 1989, 1993 15 1.1 lukem * The Regents of the University of California. All rights reserved. 16 1.1 lukem * 17 1.1 lukem * Redistribution and use in source and binary forms, with or without 18 1.1 lukem * modification, are permitted provided that the following conditions 19 1.1 lukem * are met: 20 1.1 lukem * 1. Redistributions of source code must retain the above copyright 21 1.1 lukem * notice, this list of conditions and the following disclaimer. 22 1.1 lukem * 2. Redistributions in binary form must reproduce the above copyright 23 1.1 lukem * notice, this list of conditions and the following disclaimer in the 24 1.1 lukem * documentation and/or other materials provided with the distribution. 25 1.13 agc * 3. Neither the name of the University nor the names of its contributors 26 1.1 lukem * may be used to endorse or promote products derived from this software 27 1.1 lukem * without specific prior written permission. 28 1.1 lukem * 29 1.1 lukem * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 1.1 lukem * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 1.1 lukem * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 1.1 lukem * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 1.1 lukem * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 1.1 lukem * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 1.1 lukem * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 1.1 lukem * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 1.1 lukem * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 1.1 lukem * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 1.1 lukem * SUCH DAMAGE. 40 1.1 lukem * 41 1.1 lukem * @(#)ffs_alloc.c 8.19 (Berkeley) 7/13/95 42 1.1 lukem */ 43 1.2 lukem 44 1.14 jmc #if HAVE_NBTOOL_CONFIG_H 45 1.14 jmc #include "nbtool_config.h" 46 1.14 jmc #endif 47 1.14 jmc 48 1.2 lukem #include <sys/cdefs.h> 49 1.8 tv #if defined(__RCSID) && !defined(__lint) 50 1.33 andvar __RCSID("$NetBSD: ffs_alloc.c,v 1.33 2025/06/27 19:55:38 andvar Exp $"); 51 1.2 lukem #endif /* !__lint */ 52 1.1 lukem 53 1.1 lukem #include <sys/param.h> 54 1.1 lukem #include <sys/time.h> 55 1.1 lukem 56 1.1 lukem #include <errno.h> 57 1.4 lukem 58 1.4 lukem #include "makefs.h" 59 1.1 lukem 60 1.7 lukem #include <ufs/ufs/dinode.h> 61 1.5 lukem #include <ufs/ufs/ufs_bswap.h> 62 1.5 lukem #include <ufs/ffs/fs.h> 63 1.1 lukem 64 1.1 lukem #include "ffs/buf.h" 65 1.6 lukem #include "ffs/ufs_inode.h" 66 1.1 lukem #include "ffs/ffs_extern.h" 67 1.1 lukem 68 1.1 lukem 69 1.1 lukem static int scanc(u_int, const u_char *, const u_char *, int); 70 1.1 lukem 71 1.11 fvdl static daddr_t ffs_alloccg(struct inode *, int, daddr_t, int); 72 1.11 fvdl static daddr_t ffs_alloccgblk(struct inode *, struct buf *, daddr_t); 73 1.31 chs static daddr_t ffs_hashalloc(struct inode *, uint32_t, daddr_t, int, 74 1.11 fvdl daddr_t (*)(struct inode *, int, daddr_t, int)); 75 1.12 fvdl static int32_t ffs_mapsearch(struct fs *, struct cg *, daddr_t, int); 76 1.1 lukem 77 1.1 lukem /* in ffs_tables.c */ 78 1.1 lukem extern const int inside[], around[]; 79 1.1 lukem extern const u_char * const fragtbl[]; 80 1.1 lukem 81 1.1 lukem /* 82 1.1 lukem * Allocate a block in the file system. 83 1.30 riastrad * 84 1.1 lukem * The size of the requested block is given, which must be some 85 1.1 lukem * multiple of fs_fsize and <= fs_bsize. 86 1.1 lukem * A preference may be optionally specified. If a preference is given 87 1.1 lukem * the following hierarchy is used to allocate a block: 88 1.1 lukem * 1) allocate the requested block. 89 1.1 lukem * 2) allocate a rotationally optimal block in the same cylinder. 90 1.1 lukem * 3) allocate a block in the same cylinder group. 91 1.33 andvar * 4) quadratically rehash into other cylinder groups, until an 92 1.1 lukem * available block is located. 93 1.1 lukem * If no block preference is given the following hierarchy is used 94 1.1 lukem * to allocate a block: 95 1.1 lukem * 1) allocate a block in the cylinder group that contains the 96 1.1 lukem * inode for the file. 97 1.33 andvar * 2) quadratically rehash into other cylinder groups, until an 98 1.1 lukem * available block is located. 99 1.1 lukem */ 100 1.1 lukem int 101 1.17 christos ffs_alloc(struct inode *ip, daddr_t lbn __unused, daddr_t bpref, int size, 102 1.11 fvdl daddr_t *bnp) 103 1.1 lukem { 104 1.1 lukem struct fs *fs = ip->i_fs; 105 1.11 fvdl daddr_t bno; 106 1.1 lukem int cg; 107 1.30 riastrad 108 1.1 lukem *bnp = 0; 109 1.24 dholland if (size > fs->fs_bsize || ffs_fragoff(fs, size) != 0) { 110 1.29 christos errx(EXIT_FAILURE, "%s: bad size: bsize %d size %d", __func__, 111 1.1 lukem fs->fs_bsize, size); 112 1.1 lukem } 113 1.1 lukem if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 114 1.1 lukem goto nospace; 115 1.1 lukem if (bpref >= fs->fs_size) 116 1.1 lukem bpref = 0; 117 1.1 lukem if (bpref == 0) 118 1.1 lukem cg = ino_to_cg(fs, ip->i_number); 119 1.1 lukem else 120 1.1 lukem cg = dtog(fs, bpref); 121 1.11 fvdl bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg); 122 1.1 lukem if (bno > 0) { 123 1.15 fvdl DIP_ADD(ip, blocks, size / DEV_BSIZE); 124 1.1 lukem *bnp = bno; 125 1.1 lukem return (0); 126 1.1 lukem } 127 1.1 lukem nospace: 128 1.1 lukem return (ENOSPC); 129 1.1 lukem } 130 1.1 lukem 131 1.1 lukem /* 132 1.1 lukem * Select the desired position for the next block in a file. The file is 133 1.1 lukem * logically divided into sections. The first section is composed of the 134 1.1 lukem * direct blocks. Each additional section contains fs_maxbpg blocks. 135 1.30 riastrad * 136 1.1 lukem * If no blocks have been allocated in the first section, the policy is to 137 1.1 lukem * request a block in the same cylinder group as the inode that describes 138 1.1 lukem * the file. If no blocks have been allocated in any other section, the 139 1.1 lukem * policy is to place the section in a cylinder group with a greater than 140 1.1 lukem * average number of free blocks. An appropriate cylinder group is found 141 1.1 lukem * by using a rotor that sweeps the cylinder groups. When a new group of 142 1.1 lukem * blocks is needed, the sweep begins in the cylinder group following the 143 1.1 lukem * cylinder group from which the previous allocation was made. The sweep 144 1.1 lukem * continues until a cylinder group with greater than the average number 145 1.1 lukem * of free blocks is found. If the allocation is for the first block in an 146 1.1 lukem * indirect block, the information on the previous allocation is unavailable; 147 1.1 lukem * here a best guess is made based upon the logical block number being 148 1.1 lukem * allocated. 149 1.30 riastrad * 150 1.1 lukem * If a section is already partially allocated, the policy is to 151 1.1 lukem * contiguously allocate fs_maxcontig blocks. The end of one of these 152 1.1 lukem * contiguous blocks and the beginning of the next is physically separated 153 1.1 lukem * so that the disk head will be in transit between them for at least 154 1.1 lukem * fs_rotdelay milliseconds. This is to allow time for the processor to 155 1.1 lukem * schedule another I/O transfer. 156 1.1 lukem */ 157 1.11 fvdl /* XXX ondisk32 */ 158 1.11 fvdl daddr_t 159 1.12 fvdl ffs_blkpref_ufs1(struct inode *ip, daddr_t lbn, int indx, int32_t *bap) 160 1.1 lukem { 161 1.1 lukem struct fs *fs; 162 1.31 chs uint32_t cg, startcg; 163 1.31 chs int avgbfree; 164 1.1 lukem 165 1.1 lukem fs = ip->i_fs; 166 1.1 lukem if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 167 1.24 dholland if (lbn < UFS_NDADDR + FFS_NINDIR(fs)) { 168 1.1 lukem cg = ino_to_cg(fs, ip->i_number); 169 1.1 lukem return (fs->fs_fpg * cg + fs->fs_frag); 170 1.1 lukem } 171 1.1 lukem /* 172 1.1 lukem * Find a cylinder with greater than average number of 173 1.1 lukem * unused data blocks. 174 1.1 lukem */ 175 1.1 lukem if (indx == 0 || bap[indx - 1] == 0) 176 1.1 lukem startcg = 177 1.1 lukem ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 178 1.1 lukem else 179 1.1 lukem startcg = dtog(fs, 180 1.1 lukem ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1); 181 1.1 lukem startcg %= fs->fs_ncg; 182 1.1 lukem avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 183 1.1 lukem for (cg = startcg; cg < fs->fs_ncg; cg++) 184 1.12 fvdl if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) 185 1.12 fvdl return (fs->fs_fpg * cg + fs->fs_frag); 186 1.12 fvdl for (cg = 0; cg <= startcg; cg++) 187 1.12 fvdl if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) 188 1.12 fvdl return (fs->fs_fpg * cg + fs->fs_frag); 189 1.12 fvdl return (0); 190 1.12 fvdl } 191 1.12 fvdl /* 192 1.12 fvdl * We just always try to lay things out contiguously. 193 1.12 fvdl */ 194 1.12 fvdl return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag; 195 1.12 fvdl } 196 1.12 fvdl 197 1.12 fvdl daddr_t 198 1.19 christos ffs_blkpref_ufs2(struct inode *ip, daddr_t lbn, int indx, int64_t *bap) 199 1.12 fvdl { 200 1.12 fvdl struct fs *fs; 201 1.31 chs uint32_t cg, startcg; 202 1.31 chs int avgbfree; 203 1.12 fvdl 204 1.12 fvdl fs = ip->i_fs; 205 1.12 fvdl if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 206 1.24 dholland if (lbn < UFS_NDADDR + FFS_NINDIR(fs)) { 207 1.12 fvdl cg = ino_to_cg(fs, ip->i_number); 208 1.12 fvdl return (fs->fs_fpg * cg + fs->fs_frag); 209 1.12 fvdl } 210 1.12 fvdl /* 211 1.12 fvdl * Find a cylinder with greater than average number of 212 1.12 fvdl * unused data blocks. 213 1.12 fvdl */ 214 1.12 fvdl if (indx == 0 || bap[indx - 1] == 0) 215 1.12 fvdl startcg = 216 1.12 fvdl ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 217 1.12 fvdl else 218 1.12 fvdl startcg = dtog(fs, 219 1.12 fvdl ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1); 220 1.12 fvdl startcg %= fs->fs_ncg; 221 1.12 fvdl avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 222 1.12 fvdl for (cg = startcg; cg < fs->fs_ncg; cg++) 223 1.1 lukem if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 224 1.1 lukem return (fs->fs_fpg * cg + fs->fs_frag); 225 1.1 lukem } 226 1.12 fvdl for (cg = 0; cg < startcg; cg++) 227 1.1 lukem if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 228 1.1 lukem return (fs->fs_fpg * cg + fs->fs_frag); 229 1.1 lukem } 230 1.1 lukem return (0); 231 1.1 lukem } 232 1.1 lukem /* 233 1.12 fvdl * We just always try to lay things out contiguously. 234 1.1 lukem */ 235 1.12 fvdl return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag; 236 1.1 lukem } 237 1.1 lukem 238 1.1 lukem /* 239 1.1 lukem * Implement the cylinder overflow algorithm. 240 1.1 lukem * 241 1.1 lukem * The policy implemented by this algorithm is: 242 1.1 lukem * 1) allocate the block in its requested cylinder group. 243 1.33 andvar * 2) quadratically rehash on the cylinder group number. 244 1.1 lukem * 3) brute force search for a free block. 245 1.1 lukem * 246 1.1 lukem * `size': size for data blocks, mode for inodes 247 1.1 lukem */ 248 1.1 lukem /*VARARGS5*/ 249 1.11 fvdl static daddr_t 250 1.31 chs ffs_hashalloc(struct inode *ip, uint32_t cg, daddr_t pref, int size, 251 1.11 fvdl daddr_t (*allocator)(struct inode *, int, daddr_t, int)) 252 1.1 lukem { 253 1.1 lukem struct fs *fs; 254 1.11 fvdl daddr_t result; 255 1.31 chs uint32_t i, icg = cg; 256 1.1 lukem 257 1.1 lukem fs = ip->i_fs; 258 1.1 lukem /* 259 1.1 lukem * 1: preferred cylinder group 260 1.1 lukem */ 261 1.1 lukem result = (*allocator)(ip, cg, pref, size); 262 1.1 lukem if (result) 263 1.1 lukem return (result); 264 1.1 lukem /* 265 1.1 lukem * 2: quadratic rehash 266 1.1 lukem */ 267 1.1 lukem for (i = 1; i < fs->fs_ncg; i *= 2) { 268 1.1 lukem cg += i; 269 1.1 lukem if (cg >= fs->fs_ncg) 270 1.1 lukem cg -= fs->fs_ncg; 271 1.1 lukem result = (*allocator)(ip, cg, 0, size); 272 1.1 lukem if (result) 273 1.1 lukem return (result); 274 1.1 lukem } 275 1.1 lukem /* 276 1.1 lukem * 3: brute force search 277 1.1 lukem * Note that we start at i == 2, since 0 was checked initially, 278 1.1 lukem * and 1 is always checked in the quadratic rehash. 279 1.1 lukem */ 280 1.1 lukem cg = (icg + 2) % fs->fs_ncg; 281 1.1 lukem for (i = 2; i < fs->fs_ncg; i++) { 282 1.1 lukem result = (*allocator)(ip, cg, 0, size); 283 1.1 lukem if (result) 284 1.1 lukem return (result); 285 1.1 lukem cg++; 286 1.1 lukem if (cg == fs->fs_ncg) 287 1.1 lukem cg = 0; 288 1.1 lukem } 289 1.1 lukem return (0); 290 1.1 lukem } 291 1.1 lukem 292 1.1 lukem /* 293 1.1 lukem * Determine whether a block can be allocated. 294 1.1 lukem * 295 1.1 lukem * Check to see if a block of the appropriate size is available, 296 1.1 lukem * and if it is, allocate it. 297 1.1 lukem */ 298 1.11 fvdl static daddr_t 299 1.11 fvdl ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size) 300 1.1 lukem { 301 1.1 lukem struct cg *cgp; 302 1.1 lukem struct buf *bp; 303 1.11 fvdl daddr_t bno, blkno; 304 1.1 lukem int error, frags, allocsiz, i; 305 1.1 lukem struct fs *fs = ip->i_fs; 306 1.1 lukem const int needswap = UFS_FSNEEDSWAP(fs); 307 1.1 lukem 308 1.1 lukem if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 309 1.1 lukem return (0); 310 1.25 dholland error = bread(ip->i_devvp, FFS_FSBTODB(fs, cgtod(fs, cg)), 311 1.28 agc (int)fs->fs_cgsize, 0, &bp); 312 1.1 lukem if (error) { 313 1.1 lukem return (0); 314 1.1 lukem } 315 1.1 lukem cgp = (struct cg *)bp->b_data; 316 1.1 lukem if (!cg_chkmagic(cgp, needswap) || 317 1.1 lukem (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 318 1.21 christos brelse(bp, 0); 319 1.1 lukem return (0); 320 1.1 lukem } 321 1.1 lukem if (size == fs->fs_bsize) { 322 1.1 lukem bno = ffs_alloccgblk(ip, bp, bpref); 323 1.22 christos bwrite(bp); 324 1.1 lukem return (bno); 325 1.1 lukem } 326 1.1 lukem /* 327 1.1 lukem * check to see if any fragments are already available 328 1.1 lukem * allocsiz is the size which will be allocated, hacking 329 1.1 lukem * it down to a smaller size if necessary 330 1.1 lukem */ 331 1.26 dholland frags = ffs_numfrags(fs, size); 332 1.1 lukem for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 333 1.1 lukem if (cgp->cg_frsum[allocsiz] != 0) 334 1.1 lukem break; 335 1.1 lukem if (allocsiz == fs->fs_frag) { 336 1.1 lukem /* 337 1.30 riastrad * no fragments were available, so a block will be 338 1.1 lukem * allocated, and hacked up 339 1.1 lukem */ 340 1.1 lukem if (cgp->cg_cs.cs_nbfree == 0) { 341 1.21 christos brelse(bp, 0); 342 1.1 lukem return (0); 343 1.1 lukem } 344 1.1 lukem bno = ffs_alloccgblk(ip, bp, bpref); 345 1.1 lukem bpref = dtogd(fs, bno); 346 1.1 lukem for (i = frags; i < fs->fs_frag; i++) 347 1.1 lukem setbit(cg_blksfree(cgp, needswap), bpref + i); 348 1.1 lukem i = fs->fs_frag - frags; 349 1.1 lukem ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); 350 1.1 lukem fs->fs_cstotal.cs_nffree += i; 351 1.1 lukem fs->fs_cs(fs, cg).cs_nffree += i; 352 1.1 lukem fs->fs_fmod = 1; 353 1.1 lukem ufs_add32(cgp->cg_frsum[i], 1, needswap); 354 1.1 lukem bdwrite(bp); 355 1.1 lukem return (bno); 356 1.1 lukem } 357 1.1 lukem bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 358 1.1 lukem for (i = 0; i < frags; i++) 359 1.1 lukem clrbit(cg_blksfree(cgp, needswap), bno + i); 360 1.1 lukem ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap); 361 1.1 lukem fs->fs_cstotal.cs_nffree -= frags; 362 1.1 lukem fs->fs_cs(fs, cg).cs_nffree -= frags; 363 1.1 lukem fs->fs_fmod = 1; 364 1.1 lukem ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap); 365 1.1 lukem if (frags != allocsiz) 366 1.1 lukem ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap); 367 1.1 lukem blkno = cg * fs->fs_fpg + bno; 368 1.1 lukem bdwrite(bp); 369 1.1 lukem return blkno; 370 1.1 lukem } 371 1.1 lukem 372 1.1 lukem /* 373 1.1 lukem * Allocate a block in a cylinder group. 374 1.1 lukem * 375 1.1 lukem * This algorithm implements the following policy: 376 1.1 lukem * 1) allocate the requested block. 377 1.1 lukem * 2) allocate a rotationally optimal block in the same cylinder. 378 1.1 lukem * 3) allocate the next available block on the block rotor for the 379 1.1 lukem * specified cylinder group. 380 1.1 lukem * Note that this routine only allocates fs_bsize blocks; these 381 1.1 lukem * blocks may be fragmented by the routine that allocates them. 382 1.1 lukem */ 383 1.11 fvdl static daddr_t 384 1.11 fvdl ffs_alloccgblk(struct inode *ip, struct buf *bp, daddr_t bpref) 385 1.1 lukem { 386 1.1 lukem struct cg *cgp; 387 1.12 fvdl daddr_t blkno; 388 1.12 fvdl int32_t bno; 389 1.1 lukem struct fs *fs = ip->i_fs; 390 1.1 lukem const int needswap = UFS_FSNEEDSWAP(fs); 391 1.12 fvdl u_int8_t *blksfree; 392 1.1 lukem 393 1.1 lukem cgp = (struct cg *)bp->b_data; 394 1.12 fvdl blksfree = cg_blksfree(cgp, needswap); 395 1.1 lukem if (bpref == 0 || dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) { 396 1.1 lukem bpref = ufs_rw32(cgp->cg_rotor, needswap); 397 1.12 fvdl } else { 398 1.27 dholland bpref = ffs_blknum(fs, bpref); 399 1.12 fvdl bno = dtogd(fs, bpref); 400 1.1 lukem /* 401 1.12 fvdl * if the requested block is available, use it 402 1.1 lukem */ 403 1.27 dholland if (ffs_isblock(fs, blksfree, ffs_fragstoblks(fs, bno))) 404 1.12 fvdl goto gotit; 405 1.1 lukem } 406 1.1 lukem /* 407 1.12 fvdl * Take the next available one in this cylinder group. 408 1.1 lukem */ 409 1.1 lukem bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 410 1.1 lukem if (bno < 0) 411 1.1 lukem return (0); 412 1.12 fvdl cgp->cg_rotor = ufs_rw32(bno, needswap); 413 1.1 lukem gotit: 414 1.27 dholland blkno = ffs_fragstoblks(fs, bno); 415 1.12 fvdl ffs_clrblock(fs, blksfree, (long)blkno); 416 1.1 lukem ffs_clusteracct(fs, cgp, blkno, -1); 417 1.1 lukem ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap); 418 1.1 lukem fs->fs_cstotal.cs_nbfree--; 419 1.1 lukem fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--; 420 1.1 lukem fs->fs_fmod = 1; 421 1.1 lukem blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno; 422 1.1 lukem return (blkno); 423 1.1 lukem } 424 1.1 lukem 425 1.1 lukem /* 426 1.1 lukem * Free a block or fragment. 427 1.1 lukem * 428 1.1 lukem * The specified block or fragment is placed back in the 429 1.30 riastrad * free map. If a fragment is deallocated, a possible 430 1.1 lukem * block reassembly is checked. 431 1.1 lukem */ 432 1.1 lukem void 433 1.11 fvdl ffs_blkfree(struct inode *ip, daddr_t bno, long size) 434 1.1 lukem { 435 1.1 lukem struct cg *cgp; 436 1.1 lukem struct buf *bp; 437 1.12 fvdl int32_t fragno, cgbno; 438 1.1 lukem int i, error, cg, blk, frags, bbase; 439 1.1 lukem struct fs *fs = ip->i_fs; 440 1.1 lukem const int needswap = UFS_FSNEEDSWAP(fs); 441 1.1 lukem 442 1.24 dholland if (size > fs->fs_bsize || ffs_fragoff(fs, size) != 0 || 443 1.27 dholland ffs_fragnum(fs, bno) + ffs_numfrags(fs, size) > fs->fs_frag) { 444 1.29 christos errx(EXIT_FAILURE, "%s: bad size: bno %lld bsize %d " 445 1.29 christos "size %ld", __func__, (long long)bno, fs->fs_bsize, size); 446 1.1 lukem } 447 1.1 lukem cg = dtog(fs, bno); 448 1.12 fvdl if (bno >= fs->fs_size) { 449 1.16 christos warnx("bad block %lld, ino %llu", (long long)bno, 450 1.16 christos (unsigned long long)ip->i_number); 451 1.1 lukem return; 452 1.1 lukem } 453 1.25 dholland error = bread(ip->i_devvp, FFS_FSBTODB(fs, cgtod(fs, cg)), 454 1.28 agc (int)fs->fs_cgsize, 0, &bp); 455 1.1 lukem if (error) { 456 1.1 lukem return; 457 1.1 lukem } 458 1.1 lukem cgp = (struct cg *)bp->b_data; 459 1.1 lukem if (!cg_chkmagic(cgp, needswap)) { 460 1.21 christos brelse(bp, 0); 461 1.1 lukem return; 462 1.1 lukem } 463 1.12 fvdl cgbno = dtogd(fs, bno); 464 1.1 lukem if (size == fs->fs_bsize) { 465 1.27 dholland fragno = ffs_fragstoblks(fs, cgbno); 466 1.12 fvdl if (!ffs_isfreeblock(fs, cg_blksfree(cgp, needswap), fragno)) { 467 1.29 christos errx(EXIT_FAILURE, "%s: freeing free block %lld", 468 1.29 christos __func__, (long long)bno); 469 1.1 lukem } 470 1.12 fvdl ffs_setblock(fs, cg_blksfree(cgp, needswap), fragno); 471 1.12 fvdl ffs_clusteracct(fs, cgp, fragno, 1); 472 1.1 lukem ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); 473 1.1 lukem fs->fs_cstotal.cs_nbfree++; 474 1.1 lukem fs->fs_cs(fs, cg).cs_nbfree++; 475 1.1 lukem } else { 476 1.27 dholland bbase = cgbno - ffs_fragnum(fs, cgbno); 477 1.1 lukem /* 478 1.1 lukem * decrement the counts associated with the old frags 479 1.1 lukem */ 480 1.1 lukem blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase); 481 1.1 lukem ffs_fragacct(fs, blk, cgp->cg_frsum, -1, needswap); 482 1.1 lukem /* 483 1.1 lukem * deallocate the fragment 484 1.1 lukem */ 485 1.26 dholland frags = ffs_numfrags(fs, size); 486 1.1 lukem for (i = 0; i < frags; i++) { 487 1.12 fvdl if (isset(cg_blksfree(cgp, needswap), cgbno + i)) { 488 1.29 christos errx(EXIT_FAILURE, "%s: freeing free frag: " 489 1.29 christos "block %lld", __func__, 490 1.12 fvdl (long long)(cgbno + i)); 491 1.1 lukem } 492 1.12 fvdl setbit(cg_blksfree(cgp, needswap), cgbno + i); 493 1.1 lukem } 494 1.1 lukem ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); 495 1.1 lukem fs->fs_cstotal.cs_nffree += i; 496 1.1 lukem fs->fs_cs(fs, cg).cs_nffree += i; 497 1.1 lukem /* 498 1.1 lukem * add back in counts associated with the new frags 499 1.1 lukem */ 500 1.1 lukem blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase); 501 1.1 lukem ffs_fragacct(fs, blk, cgp->cg_frsum, 1, needswap); 502 1.1 lukem /* 503 1.1 lukem * if a complete block has been reassembled, account for it 504 1.1 lukem */ 505 1.27 dholland fragno = ffs_fragstoblks(fs, bbase); 506 1.12 fvdl if (ffs_isblock(fs, cg_blksfree(cgp, needswap), fragno)) { 507 1.1 lukem ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap); 508 1.1 lukem fs->fs_cstotal.cs_nffree -= fs->fs_frag; 509 1.1 lukem fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 510 1.12 fvdl ffs_clusteracct(fs, cgp, fragno, 1); 511 1.1 lukem ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); 512 1.1 lukem fs->fs_cstotal.cs_nbfree++; 513 1.1 lukem fs->fs_cs(fs, cg).cs_nbfree++; 514 1.1 lukem } 515 1.1 lukem } 516 1.1 lukem fs->fs_fmod = 1; 517 1.1 lukem bdwrite(bp); 518 1.1 lukem } 519 1.1 lukem 520 1.1 lukem 521 1.1 lukem static int 522 1.1 lukem scanc(u_int size, const u_char *cp, const u_char table[], int mask) 523 1.1 lukem { 524 1.1 lukem const u_char *end = &cp[size]; 525 1.1 lukem 526 1.1 lukem while (cp < end && (table[*cp] & mask) == 0) 527 1.1 lukem cp++; 528 1.1 lukem return (end - cp); 529 1.1 lukem } 530 1.1 lukem 531 1.1 lukem /* 532 1.1 lukem * Find a block of the specified size in the specified cylinder group. 533 1.1 lukem * 534 1.1 lukem * It is a panic if a request is made to find a block if none are 535 1.1 lukem * available. 536 1.1 lukem */ 537 1.12 fvdl static int32_t 538 1.11 fvdl ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz) 539 1.1 lukem { 540 1.12 fvdl int32_t bno; 541 1.1 lukem int start, len, loc, i; 542 1.1 lukem int blk, field, subfield, pos; 543 1.1 lukem int ostart, olen; 544 1.1 lukem const int needswap = UFS_FSNEEDSWAP(fs); 545 1.1 lukem 546 1.1 lukem /* 547 1.1 lukem * find the fragment by searching through the free block 548 1.1 lukem * map for an appropriate bit pattern 549 1.1 lukem */ 550 1.1 lukem if (bpref) 551 1.1 lukem start = dtogd(fs, bpref) / NBBY; 552 1.1 lukem else 553 1.1 lukem start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY; 554 1.1 lukem len = howmany(fs->fs_fpg, NBBY) - start; 555 1.1 lukem ostart = start; 556 1.1 lukem olen = len; 557 1.1 lukem loc = scanc((u_int)len, 558 1.1 lukem (const u_char *)&cg_blksfree(cgp, needswap)[start], 559 1.1 lukem (const u_char *)fragtbl[fs->fs_frag], 560 1.1 lukem (1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 561 1.1 lukem if (loc == 0) { 562 1.1 lukem len = start + 1; 563 1.1 lukem start = 0; 564 1.1 lukem loc = scanc((u_int)len, 565 1.1 lukem (const u_char *)&cg_blksfree(cgp, needswap)[0], 566 1.1 lukem (const u_char *)fragtbl[fs->fs_frag], 567 1.1 lukem (1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 568 1.1 lukem if (loc == 0) { 569 1.29 christos errx(EXIT_FAILURE, "%s: map corrupted: start %d " 570 1.29 christos "len %d offset %d %ld", __func__, ostart, olen, 571 1.29 christos ufs_rw32(cgp->cg_freeoff, needswap), 572 1.29 christos (long)cg_blksfree(cgp, needswap) - (long)cgp); 573 1.1 lukem /* NOTREACHED */ 574 1.1 lukem } 575 1.1 lukem } 576 1.1 lukem bno = (start + len - loc) * NBBY; 577 1.1 lukem cgp->cg_frotor = ufs_rw32(bno, needswap); 578 1.1 lukem /* 579 1.1 lukem * found the byte in the map 580 1.1 lukem * sift through the bits to find the selected frag 581 1.1 lukem */ 582 1.1 lukem for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 583 1.1 lukem blk = blkmap(fs, cg_blksfree(cgp, needswap), bno); 584 1.1 lukem blk <<= 1; 585 1.1 lukem field = around[allocsiz]; 586 1.1 lukem subfield = inside[allocsiz]; 587 1.1 lukem for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 588 1.1 lukem if ((blk & field) == subfield) 589 1.1 lukem return (bno + pos); 590 1.1 lukem field <<= 1; 591 1.1 lukem subfield <<= 1; 592 1.1 lukem } 593 1.1 lukem } 594 1.29 christos errx(EXIT_FAILURE, "%s: block not in map: bno %lld", __func__, 595 1.29 christos (long long)bno); 596 1.1 lukem return (-1); 597 1.1 lukem } 598