ffs_alloc.c revision 1.7 1 1.7 lukem /* $NetBSD: ffs_alloc.c,v 1.7 2002/01/08 06:00:14 lukem Exp $ */
2 1.1 lukem /* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */
3 1.1 lukem
4 1.1 lukem /*
5 1.1 lukem * Copyright (c) 1982, 1986, 1989, 1993
6 1.1 lukem * The Regents of the University of California. All rights reserved.
7 1.1 lukem *
8 1.1 lukem * Redistribution and use in source and binary forms, with or without
9 1.1 lukem * modification, are permitted provided that the following conditions
10 1.1 lukem * are met:
11 1.1 lukem * 1. Redistributions of source code must retain the above copyright
12 1.1 lukem * notice, this list of conditions and the following disclaimer.
13 1.1 lukem * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 lukem * notice, this list of conditions and the following disclaimer in the
15 1.1 lukem * documentation and/or other materials provided with the distribution.
16 1.1 lukem * 3. All advertising materials mentioning features or use of this software
17 1.1 lukem * must display the following acknowledgement:
18 1.1 lukem * This product includes software developed by the University of
19 1.1 lukem * California, Berkeley and its contributors.
20 1.1 lukem * 4. Neither the name of the University nor the names of its contributors
21 1.1 lukem * may be used to endorse or promote products derived from this software
22 1.1 lukem * without specific prior written permission.
23 1.1 lukem *
24 1.1 lukem * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 1.1 lukem * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 1.1 lukem * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 1.1 lukem * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 1.1 lukem * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 1.1 lukem * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 1.1 lukem * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 1.1 lukem * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 1.1 lukem * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 1.1 lukem * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 1.1 lukem * SUCH DAMAGE.
35 1.1 lukem *
36 1.1 lukem * @(#)ffs_alloc.c 8.19 (Berkeley) 7/13/95
37 1.1 lukem */
38 1.2 lukem
39 1.2 lukem #include <sys/cdefs.h>
40 1.2 lukem #ifndef __lint
41 1.7 lukem __RCSID("$NetBSD: ffs_alloc.c,v 1.7 2002/01/08 06:00:14 lukem Exp $");
42 1.2 lukem #endif /* !__lint */
43 1.1 lukem
44 1.1 lukem #include <sys/param.h>
45 1.1 lukem #include <sys/time.h>
46 1.1 lukem
47 1.1 lukem #include <err.h>
48 1.1 lukem #include <errno.h>
49 1.4 lukem
50 1.4 lukem #include "makefs.h"
51 1.1 lukem
52 1.7 lukem #include <ufs/ufs/dinode.h>
53 1.5 lukem #include <ufs/ufs/ufs_bswap.h>
54 1.5 lukem #include <ufs/ffs/fs.h>
55 1.1 lukem
56 1.1 lukem #include "ffs/buf.h"
57 1.6 lukem #include "ffs/ufs_inode.h"
58 1.1 lukem #include "ffs/ffs_extern.h"
59 1.1 lukem
60 1.1 lukem
61 1.1 lukem static int scanc(u_int, const u_char *, const u_char *, int);
62 1.1 lukem
63 1.1 lukem static ufs_daddr_t ffs_alloccg(struct inode *, int, ufs_daddr_t, int);
64 1.1 lukem static ufs_daddr_t ffs_alloccgblk(struct inode *, struct buf *, ufs_daddr_t);
65 1.1 lukem static u_long ffs_hashalloc(struct inode *, int, long, int,
66 1.1 lukem ufs_daddr_t (*)(struct inode *, int, ufs_daddr_t, int));
67 1.1 lukem static ufs_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs_daddr_t, int);
68 1.1 lukem
69 1.1 lukem /* in ffs_tables.c */
70 1.1 lukem extern const int inside[], around[];
71 1.1 lukem extern const u_char * const fragtbl[];
72 1.1 lukem
73 1.1 lukem /*
74 1.1 lukem * Allocate a block in the file system.
75 1.1 lukem *
76 1.1 lukem * The size of the requested block is given, which must be some
77 1.1 lukem * multiple of fs_fsize and <= fs_bsize.
78 1.1 lukem * A preference may be optionally specified. If a preference is given
79 1.1 lukem * the following hierarchy is used to allocate a block:
80 1.1 lukem * 1) allocate the requested block.
81 1.1 lukem * 2) allocate a rotationally optimal block in the same cylinder.
82 1.1 lukem * 3) allocate a block in the same cylinder group.
83 1.1 lukem * 4) quadradically rehash into other cylinder groups, until an
84 1.1 lukem * available block is located.
85 1.1 lukem * If no block preference is given the following hierarchy is used
86 1.1 lukem * to allocate a block:
87 1.1 lukem * 1) allocate a block in the cylinder group that contains the
88 1.1 lukem * inode for the file.
89 1.1 lukem * 2) quadradically rehash into other cylinder groups, until an
90 1.1 lukem * available block is located.
91 1.1 lukem */
92 1.1 lukem int
93 1.1 lukem ffs_alloc(struct inode *ip, ufs_daddr_t lbn, ufs_daddr_t bpref, int size,
94 1.1 lukem ufs_daddr_t *bnp)
95 1.1 lukem {
96 1.1 lukem struct fs *fs = ip->i_fs;
97 1.1 lukem ufs_daddr_t bno;
98 1.1 lukem int cg;
99 1.1 lukem
100 1.1 lukem *bnp = 0;
101 1.1 lukem if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
102 1.1 lukem errx(1, "ffs_alloc: bad size: bsize %d size %d",
103 1.1 lukem fs->fs_bsize, size);
104 1.1 lukem }
105 1.1 lukem if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
106 1.1 lukem goto nospace;
107 1.1 lukem if (bpref >= fs->fs_size)
108 1.1 lukem bpref = 0;
109 1.1 lukem if (bpref == 0)
110 1.1 lukem cg = ino_to_cg(fs, ip->i_number);
111 1.1 lukem else
112 1.1 lukem cg = dtog(fs, bpref);
113 1.1 lukem bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size,
114 1.1 lukem ffs_alloccg);
115 1.1 lukem if (bno > 0) {
116 1.1 lukem ip->i_ffs_blocks += btodb(size);
117 1.1 lukem *bnp = bno;
118 1.1 lukem return (0);
119 1.1 lukem }
120 1.1 lukem nospace:
121 1.1 lukem return (ENOSPC);
122 1.1 lukem }
123 1.1 lukem
124 1.1 lukem /*
125 1.1 lukem * Select the desired position for the next block in a file. The file is
126 1.1 lukem * logically divided into sections. The first section is composed of the
127 1.1 lukem * direct blocks. Each additional section contains fs_maxbpg blocks.
128 1.1 lukem *
129 1.1 lukem * If no blocks have been allocated in the first section, the policy is to
130 1.1 lukem * request a block in the same cylinder group as the inode that describes
131 1.1 lukem * the file. If no blocks have been allocated in any other section, the
132 1.1 lukem * policy is to place the section in a cylinder group with a greater than
133 1.1 lukem * average number of free blocks. An appropriate cylinder group is found
134 1.1 lukem * by using a rotor that sweeps the cylinder groups. When a new group of
135 1.1 lukem * blocks is needed, the sweep begins in the cylinder group following the
136 1.1 lukem * cylinder group from which the previous allocation was made. The sweep
137 1.1 lukem * continues until a cylinder group with greater than the average number
138 1.1 lukem * of free blocks is found. If the allocation is for the first block in an
139 1.1 lukem * indirect block, the information on the previous allocation is unavailable;
140 1.1 lukem * here a best guess is made based upon the logical block number being
141 1.1 lukem * allocated.
142 1.1 lukem *
143 1.1 lukem * If a section is already partially allocated, the policy is to
144 1.1 lukem * contiguously allocate fs_maxcontig blocks. The end of one of these
145 1.1 lukem * contiguous blocks and the beginning of the next is physically separated
146 1.1 lukem * so that the disk head will be in transit between them for at least
147 1.1 lukem * fs_rotdelay milliseconds. This is to allow time for the processor to
148 1.1 lukem * schedule another I/O transfer.
149 1.1 lukem */
150 1.1 lukem ufs_daddr_t
151 1.1 lukem ffs_blkpref(struct inode *ip, ufs_daddr_t lbn, int indx, ufs_daddr_t *bap)
152 1.1 lukem {
153 1.1 lukem struct fs *fs;
154 1.1 lukem int cg;
155 1.1 lukem int avgbfree, startcg;
156 1.1 lukem ufs_daddr_t nextblk;
157 1.1 lukem
158 1.1 lukem fs = ip->i_fs;
159 1.1 lukem if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
160 1.1 lukem if (lbn < NDADDR + NINDIR(fs)) {
161 1.1 lukem cg = ino_to_cg(fs, ip->i_number);
162 1.1 lukem return (fs->fs_fpg * cg + fs->fs_frag);
163 1.1 lukem }
164 1.1 lukem /*
165 1.1 lukem * Find a cylinder with greater than average number of
166 1.1 lukem * unused data blocks.
167 1.1 lukem */
168 1.1 lukem if (indx == 0 || bap[indx - 1] == 0)
169 1.1 lukem startcg =
170 1.1 lukem ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
171 1.1 lukem else
172 1.1 lukem startcg = dtog(fs,
173 1.1 lukem ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
174 1.1 lukem startcg %= fs->fs_ncg;
175 1.1 lukem avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
176 1.1 lukem for (cg = startcg; cg < fs->fs_ncg; cg++)
177 1.1 lukem if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
178 1.1 lukem fs->fs_cgrotor = cg;
179 1.1 lukem return (fs->fs_fpg * cg + fs->fs_frag);
180 1.1 lukem }
181 1.1 lukem for (cg = 0; cg <= startcg; cg++)
182 1.1 lukem if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
183 1.1 lukem fs->fs_cgrotor = cg;
184 1.1 lukem return (fs->fs_fpg * cg + fs->fs_frag);
185 1.1 lukem }
186 1.1 lukem return (0);
187 1.1 lukem }
188 1.1 lukem /*
189 1.1 lukem * One or more previous blocks have been laid out. If less
190 1.1 lukem * than fs_maxcontig previous blocks are contiguous, the
191 1.1 lukem * next block is requested contiguously, otherwise it is
192 1.1 lukem * requested rotationally delayed by fs_rotdelay milliseconds.
193 1.1 lukem */
194 1.1 lukem nextblk = ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
195 1.1 lukem if (indx < fs->fs_maxcontig ||
196 1.1 lukem ufs_rw32(bap[indx - fs->fs_maxcontig], UFS_FSNEEDSWAP(fs)) +
197 1.1 lukem blkstofrags(fs, fs->fs_maxcontig) != nextblk)
198 1.1 lukem return (nextblk);
199 1.1 lukem if (fs->fs_rotdelay != 0)
200 1.1 lukem /*
201 1.1 lukem * Here we convert ms of delay to frags as:
202 1.1 lukem * (frags) = (ms) * (rev/sec) * (sect/rev) /
203 1.1 lukem * ((sect/frag) * (ms/sec))
204 1.1 lukem * then round up to the next block.
205 1.1 lukem */
206 1.1 lukem nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect /
207 1.1 lukem (NSPF(fs) * 1000), fs->fs_frag);
208 1.1 lukem return (nextblk);
209 1.1 lukem }
210 1.1 lukem
211 1.1 lukem /*
212 1.1 lukem * Implement the cylinder overflow algorithm.
213 1.1 lukem *
214 1.1 lukem * The policy implemented by this algorithm is:
215 1.1 lukem * 1) allocate the block in its requested cylinder group.
216 1.1 lukem * 2) quadradically rehash on the cylinder group number.
217 1.1 lukem * 3) brute force search for a free block.
218 1.1 lukem *
219 1.1 lukem * `size': size for data blocks, mode for inodes
220 1.1 lukem */
221 1.1 lukem /*VARARGS5*/
222 1.1 lukem static u_long
223 1.1 lukem ffs_hashalloc(struct inode *ip, int cg, long pref, int size,
224 1.1 lukem ufs_daddr_t (*allocator)(struct inode *, int, ufs_daddr_t, int))
225 1.1 lukem {
226 1.1 lukem struct fs *fs;
227 1.1 lukem long result;
228 1.1 lukem int i, icg = cg;
229 1.1 lukem
230 1.1 lukem fs = ip->i_fs;
231 1.1 lukem /*
232 1.1 lukem * 1: preferred cylinder group
233 1.1 lukem */
234 1.1 lukem result = (*allocator)(ip, cg, pref, size);
235 1.1 lukem if (result)
236 1.1 lukem return (result);
237 1.1 lukem /*
238 1.1 lukem * 2: quadratic rehash
239 1.1 lukem */
240 1.1 lukem for (i = 1; i < fs->fs_ncg; i *= 2) {
241 1.1 lukem cg += i;
242 1.1 lukem if (cg >= fs->fs_ncg)
243 1.1 lukem cg -= fs->fs_ncg;
244 1.1 lukem result = (*allocator)(ip, cg, 0, size);
245 1.1 lukem if (result)
246 1.1 lukem return (result);
247 1.1 lukem }
248 1.1 lukem /*
249 1.1 lukem * 3: brute force search
250 1.1 lukem * Note that we start at i == 2, since 0 was checked initially,
251 1.1 lukem * and 1 is always checked in the quadratic rehash.
252 1.1 lukem */
253 1.1 lukem cg = (icg + 2) % fs->fs_ncg;
254 1.1 lukem for (i = 2; i < fs->fs_ncg; i++) {
255 1.1 lukem result = (*allocator)(ip, cg, 0, size);
256 1.1 lukem if (result)
257 1.1 lukem return (result);
258 1.1 lukem cg++;
259 1.1 lukem if (cg == fs->fs_ncg)
260 1.1 lukem cg = 0;
261 1.1 lukem }
262 1.1 lukem return (0);
263 1.1 lukem }
264 1.1 lukem
265 1.1 lukem /*
266 1.1 lukem * Determine whether a block can be allocated.
267 1.1 lukem *
268 1.1 lukem * Check to see if a block of the appropriate size is available,
269 1.1 lukem * and if it is, allocate it.
270 1.1 lukem */
271 1.1 lukem static ufs_daddr_t
272 1.1 lukem ffs_alloccg(struct inode *ip, int cg, ufs_daddr_t bpref, int size)
273 1.1 lukem {
274 1.1 lukem struct cg *cgp;
275 1.1 lukem struct buf *bp;
276 1.1 lukem ufs_daddr_t bno, blkno;
277 1.1 lukem int error, frags, allocsiz, i;
278 1.1 lukem struct fs *fs = ip->i_fs;
279 1.1 lukem const int needswap = UFS_FSNEEDSWAP(fs);
280 1.1 lukem
281 1.1 lukem if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
282 1.1 lukem return (0);
283 1.1 lukem error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)),
284 1.1 lukem (int)fs->fs_cgsize, &bp);
285 1.1 lukem if (error) {
286 1.1 lukem brelse(bp);
287 1.1 lukem return (0);
288 1.1 lukem }
289 1.1 lukem cgp = (struct cg *)bp->b_data;
290 1.1 lukem if (!cg_chkmagic(cgp, needswap) ||
291 1.1 lukem (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
292 1.1 lukem brelse(bp);
293 1.1 lukem return (0);
294 1.1 lukem }
295 1.1 lukem if (size == fs->fs_bsize) {
296 1.1 lukem bno = ffs_alloccgblk(ip, bp, bpref);
297 1.1 lukem bdwrite(bp);
298 1.1 lukem return (bno);
299 1.1 lukem }
300 1.1 lukem /*
301 1.1 lukem * check to see if any fragments are already available
302 1.1 lukem * allocsiz is the size which will be allocated, hacking
303 1.1 lukem * it down to a smaller size if necessary
304 1.1 lukem */
305 1.1 lukem frags = numfrags(fs, size);
306 1.1 lukem for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
307 1.1 lukem if (cgp->cg_frsum[allocsiz] != 0)
308 1.1 lukem break;
309 1.1 lukem if (allocsiz == fs->fs_frag) {
310 1.1 lukem /*
311 1.1 lukem * no fragments were available, so a block will be
312 1.1 lukem * allocated, and hacked up
313 1.1 lukem */
314 1.1 lukem if (cgp->cg_cs.cs_nbfree == 0) {
315 1.1 lukem brelse(bp);
316 1.1 lukem return (0);
317 1.1 lukem }
318 1.1 lukem bno = ffs_alloccgblk(ip, bp, bpref);
319 1.1 lukem bpref = dtogd(fs, bno);
320 1.1 lukem for (i = frags; i < fs->fs_frag; i++)
321 1.1 lukem setbit(cg_blksfree(cgp, needswap), bpref + i);
322 1.1 lukem i = fs->fs_frag - frags;
323 1.1 lukem ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
324 1.1 lukem fs->fs_cstotal.cs_nffree += i;
325 1.1 lukem fs->fs_cs(fs, cg).cs_nffree += i;
326 1.1 lukem fs->fs_fmod = 1;
327 1.1 lukem ufs_add32(cgp->cg_frsum[i], 1, needswap);
328 1.1 lukem bdwrite(bp);
329 1.1 lukem return (bno);
330 1.1 lukem }
331 1.1 lukem bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
332 1.1 lukem for (i = 0; i < frags; i++)
333 1.1 lukem clrbit(cg_blksfree(cgp, needswap), bno + i);
334 1.1 lukem ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap);
335 1.1 lukem fs->fs_cstotal.cs_nffree -= frags;
336 1.1 lukem fs->fs_cs(fs, cg).cs_nffree -= frags;
337 1.1 lukem fs->fs_fmod = 1;
338 1.1 lukem ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap);
339 1.1 lukem if (frags != allocsiz)
340 1.1 lukem ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap);
341 1.1 lukem blkno = cg * fs->fs_fpg + bno;
342 1.1 lukem bdwrite(bp);
343 1.1 lukem return blkno;
344 1.1 lukem }
345 1.1 lukem
346 1.1 lukem /*
347 1.1 lukem * Allocate a block in a cylinder group.
348 1.1 lukem *
349 1.1 lukem * This algorithm implements the following policy:
350 1.1 lukem * 1) allocate the requested block.
351 1.1 lukem * 2) allocate a rotationally optimal block in the same cylinder.
352 1.1 lukem * 3) allocate the next available block on the block rotor for the
353 1.1 lukem * specified cylinder group.
354 1.1 lukem * Note that this routine only allocates fs_bsize blocks; these
355 1.1 lukem * blocks may be fragmented by the routine that allocates them.
356 1.1 lukem */
357 1.1 lukem static ufs_daddr_t
358 1.1 lukem ffs_alloccgblk(struct inode *ip, struct buf *bp, ufs_daddr_t bpref)
359 1.1 lukem {
360 1.1 lukem struct cg *cgp;
361 1.1 lukem ufs_daddr_t bno, blkno;
362 1.1 lukem int cylno, pos, delta;
363 1.1 lukem short *cylbp;
364 1.1 lukem int i;
365 1.1 lukem struct fs *fs = ip->i_fs;
366 1.1 lukem const int needswap = UFS_FSNEEDSWAP(fs);
367 1.1 lukem
368 1.1 lukem cgp = (struct cg *)bp->b_data;
369 1.1 lukem if (bpref == 0 || dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) {
370 1.1 lukem bpref = ufs_rw32(cgp->cg_rotor, needswap);
371 1.1 lukem goto norot;
372 1.1 lukem }
373 1.1 lukem bpref = blknum(fs, bpref);
374 1.1 lukem bpref = dtogd(fs, bpref);
375 1.1 lukem /*
376 1.1 lukem * if the requested block is available, use it
377 1.1 lukem */
378 1.1 lukem if (ffs_isblock(fs, cg_blksfree(cgp, needswap),
379 1.1 lukem fragstoblks(fs, bpref))) {
380 1.1 lukem bno = bpref;
381 1.1 lukem goto gotit;
382 1.1 lukem }
383 1.1 lukem if (fs->fs_nrpos <= 1 || fs->fs_cpc == 0) {
384 1.1 lukem /*
385 1.1 lukem * Block layout information is not available.
386 1.1 lukem * Leaving bpref unchanged means we take the
387 1.1 lukem * next available free block following the one
388 1.1 lukem * we just allocated. Hopefully this will at
389 1.1 lukem * least hit a track cache on drives of unknown
390 1.1 lukem * geometry (e.g. SCSI).
391 1.1 lukem */
392 1.1 lukem goto norot;
393 1.1 lukem }
394 1.1 lukem /*
395 1.1 lukem * check for a block available on the same cylinder
396 1.1 lukem */
397 1.1 lukem cylno = cbtocylno(fs, bpref);
398 1.1 lukem if (cg_blktot(cgp, needswap)[cylno] == 0)
399 1.1 lukem goto norot;
400 1.1 lukem /*
401 1.1 lukem * check the summary information to see if a block is
402 1.1 lukem * available in the requested cylinder starting at the
403 1.1 lukem * requested rotational position and proceeding around.
404 1.1 lukem */
405 1.1 lukem cylbp = cg_blks(fs, cgp, cylno, needswap);
406 1.1 lukem pos = cbtorpos(fs, bpref);
407 1.1 lukem for (i = pos; i < fs->fs_nrpos; i++)
408 1.1 lukem if (ufs_rw16(cylbp[i], needswap) > 0)
409 1.1 lukem break;
410 1.1 lukem if (i == fs->fs_nrpos)
411 1.1 lukem for (i = 0; i < pos; i++)
412 1.1 lukem if (ufs_rw16(cylbp[i], needswap) > 0)
413 1.1 lukem break;
414 1.1 lukem if (ufs_rw16(cylbp[i], needswap) > 0) {
415 1.1 lukem /*
416 1.1 lukem * found a rotational position, now find the actual
417 1.1 lukem * block. A panic if none is actually there.
418 1.1 lukem */
419 1.1 lukem pos = cylno % fs->fs_cpc;
420 1.1 lukem bno = (cylno - pos) * fs->fs_spc / NSPB(fs);
421 1.1 lukem if (fs_postbl(fs, pos)[i] == -1) {
422 1.1 lukem errx(1,
423 1.1 lukem "ffs_alloccgblk: cyl groups corrupted: pos %d i %d",
424 1.1 lukem pos, i);
425 1.1 lukem }
426 1.1 lukem for (i = fs_postbl(fs, pos)[i];; ) {
427 1.1 lukem if (ffs_isblock(fs, cg_blksfree(cgp, needswap), bno + i)) {
428 1.1 lukem bno = blkstofrags(fs, (bno + i));
429 1.1 lukem goto gotit;
430 1.1 lukem }
431 1.1 lukem delta = fs_rotbl(fs)[i];
432 1.1 lukem if (delta <= 0 ||
433 1.1 lukem delta + i > fragstoblks(fs, fs->fs_fpg))
434 1.1 lukem break;
435 1.1 lukem i += delta;
436 1.1 lukem }
437 1.1 lukem errx(1, "ffs_alloccgblk: can't find blk in cyl: pos %d i %d",
438 1.1 lukem pos, i);
439 1.1 lukem }
440 1.1 lukem norot:
441 1.1 lukem /*
442 1.1 lukem * no blocks in the requested cylinder, so take next
443 1.1 lukem * available one in this cylinder group.
444 1.1 lukem */
445 1.1 lukem bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
446 1.1 lukem if (bno < 0)
447 1.1 lukem return (0);
448 1.1 lukem cgp->cg_rotor = ufs_rw32(bno, needswap);
449 1.1 lukem gotit:
450 1.1 lukem blkno = fragstoblks(fs, bno);
451 1.1 lukem ffs_clrblock(fs, cg_blksfree(cgp, needswap), (long)blkno);
452 1.1 lukem ffs_clusteracct(fs, cgp, blkno, -1);
453 1.1 lukem ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap);
454 1.1 lukem fs->fs_cstotal.cs_nbfree--;
455 1.1 lukem fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--;
456 1.1 lukem cylno = cbtocylno(fs, bno);
457 1.1 lukem ufs_add16(cg_blks(fs, cgp, cylno, needswap)[cbtorpos(fs, bno)], -1,
458 1.1 lukem needswap);
459 1.1 lukem ufs_add32(cg_blktot(cgp, needswap)[cylno], -1, needswap);
460 1.1 lukem fs->fs_fmod = 1;
461 1.1 lukem blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno;
462 1.1 lukem return (blkno);
463 1.1 lukem }
464 1.1 lukem
465 1.1 lukem /*
466 1.1 lukem * Free a block or fragment.
467 1.1 lukem *
468 1.1 lukem * The specified block or fragment is placed back in the
469 1.1 lukem * free map. If a fragment is deallocated, a possible
470 1.1 lukem * block reassembly is checked.
471 1.1 lukem */
472 1.1 lukem void
473 1.1 lukem ffs_blkfree(struct inode *ip, ufs_daddr_t bno, long size)
474 1.1 lukem {
475 1.1 lukem struct cg *cgp;
476 1.1 lukem struct buf *bp;
477 1.1 lukem ufs_daddr_t blkno;
478 1.1 lukem int i, error, cg, blk, frags, bbase;
479 1.1 lukem struct fs *fs = ip->i_fs;
480 1.1 lukem const int needswap = UFS_FSNEEDSWAP(fs);
481 1.1 lukem
482 1.1 lukem if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
483 1.1 lukem fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
484 1.1 lukem errx(1, "blkfree: bad size: bno %u bsize %d size %ld",
485 1.1 lukem bno, fs->fs_bsize, size);
486 1.1 lukem }
487 1.1 lukem cg = dtog(fs, bno);
488 1.1 lukem if ((u_int)bno >= fs->fs_size) {
489 1.1 lukem warnx("bad block %d, ino %d\n", bno, ip->i_number);
490 1.1 lukem return;
491 1.1 lukem }
492 1.1 lukem error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)),
493 1.1 lukem (int)fs->fs_cgsize, &bp);
494 1.1 lukem if (error) {
495 1.1 lukem brelse(bp);
496 1.1 lukem return;
497 1.1 lukem }
498 1.1 lukem cgp = (struct cg *)bp->b_data;
499 1.1 lukem if (!cg_chkmagic(cgp, needswap)) {
500 1.1 lukem brelse(bp);
501 1.1 lukem return;
502 1.1 lukem }
503 1.1 lukem bno = dtogd(fs, bno);
504 1.1 lukem if (size == fs->fs_bsize) {
505 1.1 lukem blkno = fragstoblks(fs, bno);
506 1.1 lukem if (!ffs_isfreeblock(fs, cg_blksfree(cgp, needswap), blkno)) {
507 1.1 lukem errx(1, "blkfree: freeing free block %d", bno);
508 1.1 lukem }
509 1.1 lukem ffs_setblock(fs, cg_blksfree(cgp, needswap), blkno);
510 1.1 lukem ffs_clusteracct(fs, cgp, blkno, 1);
511 1.1 lukem ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
512 1.1 lukem fs->fs_cstotal.cs_nbfree++;
513 1.1 lukem fs->fs_cs(fs, cg).cs_nbfree++;
514 1.1 lukem i = cbtocylno(fs, bno);
515 1.1 lukem ufs_add16(cg_blks(fs, cgp, i, needswap)[cbtorpos(fs, bno)], 1,
516 1.1 lukem needswap);
517 1.1 lukem ufs_add32(cg_blktot(cgp, needswap)[i], 1, needswap);
518 1.1 lukem } else {
519 1.1 lukem bbase = bno - fragnum(fs, bno);
520 1.1 lukem /*
521 1.1 lukem * decrement the counts associated with the old frags
522 1.1 lukem */
523 1.1 lukem blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase);
524 1.1 lukem ffs_fragacct(fs, blk, cgp->cg_frsum, -1, needswap);
525 1.1 lukem /*
526 1.1 lukem * deallocate the fragment
527 1.1 lukem */
528 1.1 lukem frags = numfrags(fs, size);
529 1.1 lukem for (i = 0; i < frags; i++) {
530 1.1 lukem if (isset(cg_blksfree(cgp, needswap), bno + i)) {
531 1.1 lukem errx(1, "blkfree: freeing free frag: block %d",
532 1.1 lukem bno + i);
533 1.1 lukem }
534 1.1 lukem setbit(cg_blksfree(cgp, needswap), bno + i);
535 1.1 lukem }
536 1.1 lukem ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
537 1.1 lukem fs->fs_cstotal.cs_nffree += i;
538 1.1 lukem fs->fs_cs(fs, cg).cs_nffree += i;
539 1.1 lukem /*
540 1.1 lukem * add back in counts associated with the new frags
541 1.1 lukem */
542 1.1 lukem blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase);
543 1.1 lukem ffs_fragacct(fs, blk, cgp->cg_frsum, 1, needswap);
544 1.1 lukem /*
545 1.1 lukem * if a complete block has been reassembled, account for it
546 1.1 lukem */
547 1.1 lukem blkno = fragstoblks(fs, bbase);
548 1.1 lukem if (ffs_isblock(fs, cg_blksfree(cgp, needswap), blkno)) {
549 1.1 lukem ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap);
550 1.1 lukem fs->fs_cstotal.cs_nffree -= fs->fs_frag;
551 1.1 lukem fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
552 1.1 lukem ffs_clusteracct(fs, cgp, blkno, 1);
553 1.1 lukem ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
554 1.1 lukem fs->fs_cstotal.cs_nbfree++;
555 1.1 lukem fs->fs_cs(fs, cg).cs_nbfree++;
556 1.1 lukem i = cbtocylno(fs, bbase);
557 1.1 lukem ufs_add16(cg_blks(fs, cgp, i, needswap)[cbtorpos(fs,
558 1.1 lukem bbase)], 1,
559 1.1 lukem needswap);
560 1.1 lukem ufs_add32(cg_blktot(cgp, needswap)[i], 1, needswap);
561 1.1 lukem }
562 1.1 lukem }
563 1.1 lukem fs->fs_fmod = 1;
564 1.1 lukem bdwrite(bp);
565 1.1 lukem }
566 1.1 lukem
567 1.1 lukem
568 1.1 lukem static int
569 1.1 lukem scanc(u_int size, const u_char *cp, const u_char table[], int mask)
570 1.1 lukem {
571 1.1 lukem const u_char *end = &cp[size];
572 1.1 lukem
573 1.1 lukem while (cp < end && (table[*cp] & mask) == 0)
574 1.1 lukem cp++;
575 1.1 lukem return (end - cp);
576 1.1 lukem }
577 1.1 lukem
578 1.1 lukem /*
579 1.1 lukem * Find a block of the specified size in the specified cylinder group.
580 1.1 lukem *
581 1.1 lukem * It is a panic if a request is made to find a block if none are
582 1.1 lukem * available.
583 1.1 lukem */
584 1.1 lukem static ufs_daddr_t
585 1.1 lukem ffs_mapsearch(struct fs *fs, struct cg *cgp, ufs_daddr_t bpref, int allocsiz)
586 1.1 lukem {
587 1.1 lukem ufs_daddr_t bno;
588 1.1 lukem int start, len, loc, i;
589 1.1 lukem int blk, field, subfield, pos;
590 1.1 lukem int ostart, olen;
591 1.1 lukem const int needswap = UFS_FSNEEDSWAP(fs);
592 1.1 lukem
593 1.1 lukem /*
594 1.1 lukem * find the fragment by searching through the free block
595 1.1 lukem * map for an appropriate bit pattern
596 1.1 lukem */
597 1.1 lukem if (bpref)
598 1.1 lukem start = dtogd(fs, bpref) / NBBY;
599 1.1 lukem else
600 1.1 lukem start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY;
601 1.1 lukem len = howmany(fs->fs_fpg, NBBY) - start;
602 1.1 lukem ostart = start;
603 1.1 lukem olen = len;
604 1.1 lukem loc = scanc((u_int)len,
605 1.1 lukem (const u_char *)&cg_blksfree(cgp, needswap)[start],
606 1.1 lukem (const u_char *)fragtbl[fs->fs_frag],
607 1.1 lukem (1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
608 1.1 lukem if (loc == 0) {
609 1.1 lukem len = start + 1;
610 1.1 lukem start = 0;
611 1.1 lukem loc = scanc((u_int)len,
612 1.1 lukem (const u_char *)&cg_blksfree(cgp, needswap)[0],
613 1.1 lukem (const u_char *)fragtbl[fs->fs_frag],
614 1.1 lukem (1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
615 1.1 lukem if (loc == 0) {
616 1.1 lukem errx(1,
617 1.1 lukem "ffs_alloccg: map corrupted: start %d len %d offset %d %ld",
618 1.1 lukem ostart, olen,
619 1.1 lukem ufs_rw32(cgp->cg_freeoff, needswap),
620 1.1 lukem (long)cg_blksfree(cgp, needswap) - (long)cgp);
621 1.1 lukem /* NOTREACHED */
622 1.1 lukem }
623 1.1 lukem }
624 1.1 lukem bno = (start + len - loc) * NBBY;
625 1.1 lukem cgp->cg_frotor = ufs_rw32(bno, needswap);
626 1.1 lukem /*
627 1.1 lukem * found the byte in the map
628 1.1 lukem * sift through the bits to find the selected frag
629 1.1 lukem */
630 1.1 lukem for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
631 1.1 lukem blk = blkmap(fs, cg_blksfree(cgp, needswap), bno);
632 1.1 lukem blk <<= 1;
633 1.1 lukem field = around[allocsiz];
634 1.1 lukem subfield = inside[allocsiz];
635 1.1 lukem for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
636 1.1 lukem if ((blk & field) == subfield)
637 1.1 lukem return (bno + pos);
638 1.1 lukem field <<= 1;
639 1.1 lukem subfield <<= 1;
640 1.1 lukem }
641 1.1 lukem }
642 1.1 lukem errx(1, "ffs_alloccg: block not in map: bno %d", bno);
643 1.1 lukem return (-1);
644 1.1 lukem }
645 1.1 lukem
646 1.1 lukem /*
647 1.1 lukem * Update the cluster map because of an allocation or free.
648 1.1 lukem *
649 1.1 lukem * Cnt == 1 means free; cnt == -1 means allocating.
650 1.1 lukem */
651 1.1 lukem void
652 1.1 lukem ffs_clusteracct(struct fs *fs, struct cg *cgp, ufs_daddr_t blkno, int cnt)
653 1.1 lukem {
654 1.1 lukem int32_t *sump;
655 1.1 lukem int32_t *lp;
656 1.1 lukem u_char *freemapp, *mapp;
657 1.1 lukem int i, start, end, forw, back, map, bit;
658 1.1 lukem const int needswap = UFS_FSNEEDSWAP(fs);
659 1.1 lukem
660 1.1 lukem if (fs->fs_contigsumsize <= 0)
661 1.1 lukem return;
662 1.1 lukem freemapp = cg_clustersfree(cgp, needswap);
663 1.1 lukem sump = cg_clustersum(cgp, needswap);
664 1.1 lukem /*
665 1.1 lukem * Allocate or clear the actual block.
666 1.1 lukem */
667 1.1 lukem if (cnt > 0)
668 1.1 lukem setbit(freemapp, blkno);
669 1.1 lukem else
670 1.1 lukem clrbit(freemapp, blkno);
671 1.1 lukem /*
672 1.1 lukem * Find the size of the cluster going forward.
673 1.1 lukem */
674 1.1 lukem start = blkno + 1;
675 1.1 lukem end = start + fs->fs_contigsumsize;
676 1.1 lukem if (end >= ufs_rw32(cgp->cg_nclusterblks, needswap))
677 1.1 lukem end = ufs_rw32(cgp->cg_nclusterblks, needswap);
678 1.1 lukem mapp = &freemapp[start / NBBY];
679 1.1 lukem map = *mapp++;
680 1.1 lukem bit = 1 << (start % NBBY);
681 1.1 lukem for (i = start; i < end; i++) {
682 1.1 lukem if ((map & bit) == 0)
683 1.1 lukem break;
684 1.1 lukem if ((i & (NBBY - 1)) != (NBBY - 1)) {
685 1.1 lukem bit <<= 1;
686 1.1 lukem } else {
687 1.1 lukem map = *mapp++;
688 1.1 lukem bit = 1;
689 1.1 lukem }
690 1.1 lukem }
691 1.1 lukem forw = i - start;
692 1.1 lukem /*
693 1.1 lukem * Find the size of the cluster going backward.
694 1.1 lukem */
695 1.1 lukem start = blkno - 1;
696 1.1 lukem end = start - fs->fs_contigsumsize;
697 1.1 lukem if (end < 0)
698 1.1 lukem end = -1;
699 1.1 lukem mapp = &freemapp[start / NBBY];
700 1.1 lukem map = *mapp--;
701 1.1 lukem bit = 1 << (start % NBBY);
702 1.1 lukem for (i = start; i > end; i--) {
703 1.1 lukem if ((map & bit) == 0)
704 1.1 lukem break;
705 1.1 lukem if ((i & (NBBY - 1)) != 0) {
706 1.1 lukem bit >>= 1;
707 1.1 lukem } else {
708 1.1 lukem map = *mapp--;
709 1.1 lukem bit = 1 << (NBBY - 1);
710 1.1 lukem }
711 1.1 lukem }
712 1.1 lukem back = start - i;
713 1.1 lukem /*
714 1.1 lukem * Account for old cluster and the possibly new forward and
715 1.1 lukem * back clusters.
716 1.1 lukem */
717 1.1 lukem i = back + forw + 1;
718 1.1 lukem if (i > fs->fs_contigsumsize)
719 1.1 lukem i = fs->fs_contigsumsize;
720 1.1 lukem ufs_add32(sump[i], cnt, needswap);
721 1.1 lukem if (back > 0)
722 1.1 lukem ufs_add32(sump[back], -cnt, needswap);
723 1.1 lukem if (forw > 0)
724 1.1 lukem ufs_add32(sump[forw], -cnt, needswap);
725 1.1 lukem
726 1.1 lukem /*
727 1.1 lukem * Update cluster summary information.
728 1.1 lukem */
729 1.1 lukem lp = &sump[fs->fs_contigsumsize];
730 1.1 lukem for (i = fs->fs_contigsumsize; i > 0; i--)
731 1.1 lukem if (ufs_rw32(*lp--, needswap) > 0)
732 1.1 lukem break;
733 1.1 lukem fs->fs_maxcluster[ufs_rw32(cgp->cg_cgx, needswap)] = i;
734 1.1 lukem }
735