lfs_balloc.c revision 1.53 1 /* $NetBSD: lfs_balloc.c,v 1.53 2005/04/14 00:44:17 perseant Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*
39 * Copyright (c) 1989, 1991, 1993
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)lfs_balloc.c 8.4 (Berkeley) 5/8/95
67 */
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: lfs_balloc.c,v 1.53 2005/04/14 00:44:17 perseant Exp $");
71
72 #if defined(_KERNEL_OPT)
73 #include "opt_quota.h"
74 #endif
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/buf.h>
79 #include <sys/proc.h>
80 #include <sys/vnode.h>
81 #include <sys/mount.h>
82 #include <sys/resourcevar.h>
83 #include <sys/trace.h>
84 #include <sys/malloc.h>
85
86 #include <miscfs/specfs/specdev.h>
87
88 #include <ufs/ufs/quota.h>
89 #include <ufs/ufs/inode.h>
90 #include <ufs/ufs/ufsmount.h>
91 #include <ufs/ufs/ufs_extern.h>
92
93 #include <ufs/lfs/lfs.h>
94 #include <ufs/lfs/lfs_extern.h>
95
96 #include <uvm/uvm.h>
97
98 int lfs_fragextend(struct vnode *, int, int, daddr_t, struct buf **, struct ucred *);
99
100 u_int64_t locked_fakequeue_count;
101 extern int lfs_blist_hw;
102 extern int lfs_blist_total; /* # entries in hash table */
103 extern int lfs_blist_maxdepth; /* Hash max depth */
104 extern LIST_HEAD(, lbnentry) *lfs_blist;
105
106 /*
107 * Allocate a block, and to inode and filesystem block accounting for it
108 * and for any indirect blocks the may need to be created in order for
109 * this block to be created.
110 *
111 * Blocks which have never been accounted for (i.e., which "do not exist")
112 * have disk address 0, which is translated by ufs_bmap to the special value
113 * UNASSIGNED == -1, as in the historical UFS.
114 *
115 * Blocks which have been accounted for but which have not yet been written
116 * to disk are given the new special disk address UNWRITTEN == -2, so that
117 * they can be differentiated from completely new blocks.
118 */
119 /* VOP_BWRITE NIADDR+2 times */
120 int
121 lfs_balloc(void *v)
122 {
123 struct vop_balloc_args /* {
124 struct vnode *a_vp;
125 off_t a_startoffset;
126 int a_size;
127 struct ucred *a_cred;
128 int a_flags;
129 struct buf *a_bpp;
130 } */ *ap = v;
131 struct vnode *vp;
132 int offset;
133 u_long iosize;
134 daddr_t daddr, idaddr;
135 struct buf *ibp, *bp, **bpp;
136 struct inode *ip;
137 struct lfs *fs;
138 struct indir indirs[NIADDR+2], *idp;
139 daddr_t lbn, lastblock;
140 int bb, bcount;
141 int error, frags, i, nsize, osize, num;
142
143 vp = ap->a_vp;
144 ip = VTOI(vp);
145 fs = ip->i_lfs;
146 offset = blkoff(fs, ap->a_startoffset);
147 iosize = ap->a_size;
148 KASSERT(iosize <= fs->lfs_bsize);
149 lbn = lblkno(fs, ap->a_startoffset);
150 /* (void)lfs_check(vp, lbn, 0); */
151 bpp = ap->a_bpp;
152
153 ASSERT_MAYBE_SEGLOCK(fs);
154
155 /*
156 * Three cases: it's a block beyond the end of file, it's a block in
157 * the file that may or may not have been assigned a disk address or
158 * we're writing an entire block.
159 *
160 * Note, if the daddr is UNWRITTEN, the block already exists in
161 * the cache (it was read or written earlier). If so, make sure
162 * we don't count it as a new block or zero out its contents. If
163 * it did not, make sure we allocate any necessary indirect
164 * blocks.
165 *
166 * If we are writing a block beyond the end of the file, we need to
167 * check if the old last block was a fragment. If it was, we need
168 * to rewrite it.
169 */
170
171 if (bpp)
172 *bpp = NULL;
173
174 /* Check for block beyond end of file and fragment extension needed. */
175 lastblock = lblkno(fs, ip->i_size);
176 if (lastblock < NDADDR && lastblock < lbn) {
177 osize = blksize(fs, ip, lastblock);
178 if (osize < fs->lfs_bsize && osize > 0) {
179 if ((error = lfs_fragextend(vp, osize, fs->lfs_bsize,
180 lastblock,
181 (bpp ? &bp : NULL),
182 ap->a_cred)))
183 return (error);
184 ip->i_ffs1_size = ip->i_size =
185 (lastblock + 1) * fs->lfs_bsize;
186 uvm_vnp_setsize(vp, ip->i_size);
187 ip->i_flag |= IN_CHANGE | IN_UPDATE;
188 if (bpp)
189 (void) VOP_BWRITE(bp);
190 }
191 }
192
193 /*
194 * If the block we are writing is a direct block, it's the last
195 * block in the file, and offset + iosize is less than a full
196 * block, we can write one or more fragments. There are two cases:
197 * the block is brand new and we should allocate it the correct
198 * size or it already exists and contains some fragments and
199 * may need to extend it.
200 */
201 if (lbn < NDADDR && lblkno(fs, ip->i_size) <= lbn) {
202 osize = blksize(fs, ip, lbn);
203 nsize = fragroundup(fs, offset + iosize);
204 if (lblktosize(fs, lbn) >= ip->i_size) {
205 /* Brand new block or fragment */
206 frags = numfrags(fs, nsize);
207 bb = fragstofsb(fs, frags);
208 if (!ISSPACE(fs, bb, ap->a_cred))
209 return ENOSPC;
210 if (bpp) {
211 *ap->a_bpp = bp = getblk(vp, lbn, nsize, 0, 0);
212 bp->b_blkno = UNWRITTEN;
213 if (ap->a_flags & B_CLRBUF)
214 clrbuf(bp);
215 }
216 ip->i_lfs_effnblks += bb;
217 simple_lock(&fs->lfs_interlock);
218 fs->lfs_bfree -= bb;
219 simple_unlock(&fs->lfs_interlock);
220 ip->i_ffs1_db[lbn] = UNWRITTEN;
221 } else {
222 if (nsize <= osize) {
223 /* No need to extend */
224 if (bpp && (error = bread(vp, lbn, osize, NOCRED, &bp)))
225 return error;
226 } else {
227 /* Extend existing block */
228 if ((error =
229 lfs_fragextend(vp, osize, nsize, lbn,
230 (bpp ? &bp : NULL),
231 ap->a_cred)))
232 return error;
233 }
234 if (bpp)
235 *bpp = bp;
236 }
237 return 0;
238 }
239
240 error = ufs_bmaparray(vp, lbn, &daddr, &indirs[0], &num, NULL, NULL);
241 if (error)
242 return (error);
243
244 daddr = (daddr_t)((int32_t)daddr); /* XXX ondisk32 */
245 KASSERT(daddr <= LFS_MAX_DADDR);
246
247 /*
248 * Do byte accounting all at once, so we can gracefully fail *before*
249 * we start assigning blocks.
250 */
251 bb = VFSTOUFS(vp->v_mount)->um_seqinc;
252 bcount = 0;
253 if (daddr == UNASSIGNED) {
254 bcount = bb;
255 }
256 for (i = 1; i < num; ++i) {
257 if (!indirs[i].in_exists) {
258 bcount += bb;
259 }
260 }
261 if (ISSPACE(fs, bcount, ap->a_cred)) {
262 simple_lock(&fs->lfs_interlock);
263 fs->lfs_bfree -= bcount;
264 simple_unlock(&fs->lfs_interlock);
265 ip->i_lfs_effnblks += bcount;
266 } else {
267 return ENOSPC;
268 }
269
270 if (daddr == UNASSIGNED) {
271 if (num > 0 && ip->i_ffs1_ib[indirs[0].in_off] == 0) {
272 ip->i_ffs1_ib[indirs[0].in_off] = UNWRITTEN;
273 }
274
275 /*
276 * Create new indirect blocks if necessary
277 */
278 if (num > 1) {
279 idaddr = ip->i_ffs1_ib[indirs[0].in_off];
280 for (i = 1; i < num; ++i) {
281 ibp = getblk(vp, indirs[i].in_lbn,
282 fs->lfs_bsize, 0,0);
283 if (!indirs[i].in_exists) {
284 clrbuf(ibp);
285 ibp->b_blkno = UNWRITTEN;
286 } else if (!(ibp->b_flags & (B_DELWRI | B_DONE))) {
287 ibp->b_blkno = fsbtodb(fs, idaddr);
288 ibp->b_flags |= B_READ;
289 VOP_STRATEGY(vp, ibp);
290 biowait(ibp);
291 }
292 /*
293 * This block exists, but the next one may not.
294 * If that is the case mark it UNWRITTEN to keep
295 * the accounting straight.
296 */
297 /* XXX ondisk32 */
298 if (((int32_t *)ibp->b_data)[indirs[i].in_off] == 0)
299 ((int32_t *)ibp->b_data)[indirs[i].in_off] =
300 UNWRITTEN;
301 /* XXX ondisk32 */
302 idaddr = ((int32_t *)ibp->b_data)[indirs[i].in_off];
303 #ifdef DEBUG
304 if (vp == fs->lfs_ivnode) {
305 LFS_ENTER_LOG("balloc", __FILE__,
306 __LINE__, indirs[i].in_lbn,
307 ibp->b_flags, curproc->p_pid);
308 }
309 #endif
310 if ((error = VOP_BWRITE(ibp)))
311 return error;
312 }
313 }
314 }
315
316
317 /*
318 * Get the existing block from the cache, if requested.
319 */
320 frags = fsbtofrags(fs, bb);
321 if (bpp)
322 *bpp = bp = getblk(vp, lbn, blksize(fs, ip, lbn), 0, 0);
323
324 /*
325 * Do accounting on blocks that represent pages.
326 */
327 if (!bpp)
328 lfs_register_block(vp, lbn);
329
330 /*
331 * The block we are writing may be a brand new block
332 * in which case we need to do accounting.
333 *
334 * We can tell a truly new block because ufs_bmaparray will say
335 * it is UNASSIGNED. Once we allocate it we will assign it the
336 * disk address UNWRITTEN.
337 */
338 if (daddr == UNASSIGNED) {
339 if (bpp) {
340 if (ap->a_flags & B_CLRBUF)
341 clrbuf(bp);
342
343 /* Note the new address */
344 bp->b_blkno = UNWRITTEN;
345 }
346
347 switch (num) {
348 case 0:
349 ip->i_ffs1_db[lbn] = UNWRITTEN;
350 break;
351 case 1:
352 ip->i_ffs1_ib[indirs[0].in_off] = UNWRITTEN;
353 break;
354 default:
355 idp = &indirs[num - 1];
356 if (bread(vp, idp->in_lbn, fs->lfs_bsize, NOCRED,
357 &ibp))
358 panic("lfs_balloc: bread bno %lld",
359 (long long)idp->in_lbn);
360 /* XXX ondisk32 */
361 ((int32_t *)ibp->b_data)[idp->in_off] = UNWRITTEN;
362 #ifdef DEBUG
363 if (vp == fs->lfs_ivnode) {
364 LFS_ENTER_LOG("balloc", __FILE__,
365 __LINE__, idp->in_lbn,
366 ibp->b_flags, curproc->p_pid);
367 }
368 #endif
369 VOP_BWRITE(ibp);
370 }
371 } else if (bpp && !(bp->b_flags & (B_DONE|B_DELWRI))) {
372 /*
373 * Not a brand new block, also not in the cache;
374 * read it in from disk.
375 */
376 if (iosize == fs->lfs_bsize)
377 /* Optimization: I/O is unnecessary. */
378 bp->b_blkno = daddr;
379 else {
380 /*
381 * We need to read the block to preserve the
382 * existing bytes.
383 */
384 bp->b_blkno = daddr;
385 bp->b_flags |= B_READ;
386 VOP_STRATEGY(vp, bp);
387 return (biowait(bp));
388 }
389 }
390
391 return (0);
392 }
393
394 /* VOP_BWRITE 1 time */
395 int
396 lfs_fragextend(struct vnode *vp, int osize, int nsize, daddr_t lbn, struct buf **bpp, struct ucred *cred)
397 {
398 struct inode *ip;
399 struct lfs *fs;
400 long bb;
401 int error;
402 extern long locked_queue_bytes;
403 size_t obufsize;
404
405 ip = VTOI(vp);
406 fs = ip->i_lfs;
407 bb = (long)fragstofsb(fs, numfrags(fs, nsize - osize));
408 error = 0;
409
410 ASSERT_DUNNO_SEGLOCK(fs);
411
412 /*
413 * Get the seglock so we don't enlarge blocks while a segment
414 * is being written. If we're called with bpp==NULL, though,
415 * we are only pretending to change a buffer, so we don't have to
416 * lock.
417 */
418 top:
419 if (bpp) {
420 lockmgr(&fs->lfs_fraglock, LK_SHARED, 0);
421 LFS_DEBUG_COUNTLOCKED("frag");
422 }
423
424 if (!ISSPACE(fs, bb, cred)) {
425 error = ENOSPC;
426 goto out;
427 }
428
429 /*
430 * If we are not asked to actually return the block, all we need
431 * to do is allocate space for it. UBC will handle dirtying the
432 * appropriate things and making sure it all goes to disk.
433 * Don't bother to read in that case.
434 */
435 if (bpp && (error = bread(vp, lbn, osize, NOCRED, bpp))) {
436 brelse(*bpp);
437 goto out;
438 }
439 #ifdef QUOTA
440 if ((error = chkdq(ip, bb, cred, 0))) {
441 if (bpp)
442 brelse(*bpp);
443 goto out;
444 }
445 #endif
446 /*
447 * Adjust accounting for lfs_avail. If there's not enough room,
448 * we will have to wait for the cleaner, which we can't do while
449 * holding a block busy or while holding the seglock. In that case,
450 * release both and start over after waiting.
451 */
452
453 if (bpp && ((*bpp)->b_flags & B_DELWRI)) {
454 if (!lfs_fits(fs, bb)) {
455 if (bpp)
456 brelse(*bpp);
457 #ifdef QUOTA
458 chkdq(ip, -bb, cred, 0);
459 #endif
460 lockmgr(&fs->lfs_fraglock, LK_RELEASE, 0);
461 lfs_availwait(fs, bb);
462 goto top;
463 }
464 fs->lfs_avail -= bb;
465 }
466
467 simple_lock(&fs->lfs_interlock);
468 fs->lfs_bfree -= bb;
469 simple_unlock(&fs->lfs_interlock);
470 ip->i_lfs_effnblks += bb;
471 ip->i_flag |= IN_CHANGE | IN_UPDATE;
472
473 if (bpp) {
474 obufsize = (*bpp)->b_bufsize;
475 allocbuf(*bpp, nsize, 1);
476
477 /* Adjust locked-list accounting */
478 if (((*bpp)->b_flags & (B_LOCKED | B_CALL)) == B_LOCKED) {
479 simple_lock(&lfs_subsys_lock);
480 locked_queue_bytes += (*bpp)->b_bufsize - obufsize;
481 simple_unlock(&lfs_subsys_lock);
482 }
483
484 bzero((char *)((*bpp)->b_data) + osize, (u_int)(nsize - osize));
485 }
486
487 out:
488 if (bpp) {
489 lockmgr(&fs->lfs_fraglock, LK_RELEASE, 0);
490 }
491 return (error);
492 }
493
494 static __inline unsigned int
495 lfs_blist_hash(struct lfs *fs, struct inode *ip, daddr_t lbn)
496 {
497 return ((intptr_t)fs ^ ip->i_number ^ lbn) & (lfs_blist_hw - 1);
498 }
499
500 #define HASH_HEADER(ip, hash) &(lfs_blist[hash])
501
502 /*
503 * Record this lbn as being "write pending". We used to have this information
504 * on the buffer headers, but since pages don't have buffer headers we
505 * record it here instead.
506 */
507 void
508 lfs_register_block(struct vnode *vp, daddr_t lbn)
509 {
510 struct lfs *fs;
511 struct inode *ip;
512 struct lbnentry *lbp;
513 unsigned int hash, depth;
514
515 ip = VTOI(vp);
516
517 /* Don't count metadata */
518 if (lbn < 0 || vp->v_type != VREG || ip->i_number == LFS_IFILE_INUM)
519 return;
520
521 fs = ip->i_lfs;
522
523 ASSERT_NO_SEGLOCK(fs);
524
525 /* If no space, wait for the cleaner */
526 lfs_availwait(fs, btofsb(fs, 1 << fs->lfs_bshift));
527
528 hash = lfs_blist_hash(fs, ip, lbn);
529 depth = 0;
530 LIST_FOREACH(lbp, HASH_HEADER(ip, hash), entry) {
531 if (lbp->lbn == lbn && lbp->vp == vp && lbp->fs == fs)
532 return;
533 ++depth;
534 }
535
536 lbp = (struct lbnentry *)pool_get(&lfs_lbnentry_pool, PR_WAITOK);
537 lbp->lbn = lbn;
538 lbp->vp = vp;
539 lbp->fs = fs;
540 if (depth > lfs_blist_maxdepth)
541 lfs_blist_maxdepth = depth;
542 ++lfs_blist_total;
543 LIST_INSERT_HEAD(HASH_HEADER(ip, hash), lbp, entry);
544
545 simple_lock(&fs->lfs_interlock);
546 fs->lfs_favail += btofsb(fs, (1 << fs->lfs_bshift));
547 ++locked_fakequeue_count;
548 simple_unlock(&fs->lfs_interlock);
549 }
550
551 static void
552 lfs_do_deregister(struct lfs *fs, struct inode *ip, struct lbnentry *lbp)
553 {
554 ASSERT_MAYBE_SEGLOCK(fs);
555
556 LIST_REMOVE(lbp, entry);
557 --lfs_blist_total;
558 pool_put(&lfs_lbnentry_pool, lbp);
559 simple_lock(&fs->lfs_interlock);
560 if (fs->lfs_favail > btofsb(fs, (1 << fs->lfs_bshift)))
561 fs->lfs_favail -= btofsb(fs, (1 << fs->lfs_bshift));
562 simple_lock(&lfs_subsys_lock);
563 if (locked_fakequeue_count > 0)
564 --locked_fakequeue_count;
565 simple_unlock(&lfs_subsys_lock);
566 simple_unlock(&fs->lfs_interlock);
567 }
568
569 void
570 lfs_deregister_block(struct vnode *vp, daddr_t lbn)
571 {
572 struct lfs *fs;
573 struct inode *ip;
574 struct lbnentry *lbp;
575 unsigned int hash;
576
577 ip = VTOI(vp);
578
579 /* Don't count metadata */
580 if (lbn < 0 || vp->v_type != VREG || ip->i_number == LFS_IFILE_INUM)
581 return;
582
583 fs = ip->i_lfs;
584 hash = lfs_blist_hash(fs, ip, lbn);
585 LIST_FOREACH(lbp, HASH_HEADER(vp, hash), entry) {
586 if (lbp->lbn == lbn && lbp->vp == vp && lbp->fs == fs)
587 break;
588 }
589 if (lbp == NULL)
590 return;
591
592 lfs_do_deregister(fs, ip, lbp);
593 }
594