lfs_balloc.c revision 1.52 1 /* $NetBSD: lfs_balloc.c,v 1.52 2005/04/01 21:59:46 perseant Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*
39 * Copyright (c) 1989, 1991, 1993
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)lfs_balloc.c 8.4 (Berkeley) 5/8/95
67 */
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: lfs_balloc.c,v 1.52 2005/04/01 21:59:46 perseant Exp $");
71
72 #if defined(_KERNEL_OPT)
73 #include "opt_quota.h"
74 #endif
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/buf.h>
79 #include <sys/proc.h>
80 #include <sys/vnode.h>
81 #include <sys/mount.h>
82 #include <sys/resourcevar.h>
83 #include <sys/trace.h>
84 #include <sys/malloc.h>
85
86 #include <miscfs/specfs/specdev.h>
87
88 #include <ufs/ufs/quota.h>
89 #include <ufs/ufs/inode.h>
90 #include <ufs/ufs/ufsmount.h>
91 #include <ufs/ufs/ufs_extern.h>
92
93 #include <ufs/lfs/lfs.h>
94 #include <ufs/lfs/lfs_extern.h>
95
96 #include <uvm/uvm.h>
97
98 int lfs_fragextend(struct vnode *, int, int, daddr_t, struct buf **, struct ucred *);
99
100 u_int64_t locked_fakequeue_count;
101
102 /*
103 * Allocate a block, and to inode and filesystem block accounting for it
104 * and for any indirect blocks the may need to be created in order for
105 * this block to be created.
106 *
107 * Blocks which have never been accounted for (i.e., which "do not exist")
108 * have disk address 0, which is translated by ufs_bmap to the special value
109 * UNASSIGNED == -1, as in the historical UFS.
110 *
111 * Blocks which have been accounted for but which have not yet been written
112 * to disk are given the new special disk address UNWRITTEN == -2, so that
113 * they can be differentiated from completely new blocks.
114 */
115 /* VOP_BWRITE NIADDR+2 times */
116 int
117 lfs_balloc(void *v)
118 {
119 struct vop_balloc_args /* {
120 struct vnode *a_vp;
121 off_t a_startoffset;
122 int a_size;
123 struct ucred *a_cred;
124 int a_flags;
125 struct buf *a_bpp;
126 } */ *ap = v;
127 struct vnode *vp;
128 int offset;
129 u_long iosize;
130 daddr_t daddr, idaddr;
131 struct buf *ibp, *bp, **bpp;
132 struct inode *ip;
133 struct lfs *fs;
134 struct indir indirs[NIADDR+2], *idp;
135 daddr_t lbn, lastblock;
136 int bb, bcount;
137 int error, frags, i, nsize, osize, num;
138
139 vp = ap->a_vp;
140 ip = VTOI(vp);
141 fs = ip->i_lfs;
142 offset = blkoff(fs, ap->a_startoffset);
143 iosize = ap->a_size;
144 KASSERT(iosize <= fs->lfs_bsize);
145 lbn = lblkno(fs, ap->a_startoffset);
146 /* (void)lfs_check(vp, lbn, 0); */
147 bpp = ap->a_bpp;
148
149 ASSERT_MAYBE_SEGLOCK(fs);
150
151 /*
152 * Three cases: it's a block beyond the end of file, it's a block in
153 * the file that may or may not have been assigned a disk address or
154 * we're writing an entire block.
155 *
156 * Note, if the daddr is UNWRITTEN, the block already exists in
157 * the cache (it was read or written earlier). If so, make sure
158 * we don't count it as a new block or zero out its contents. If
159 * it did not, make sure we allocate any necessary indirect
160 * blocks.
161 *
162 * If we are writing a block beyond the end of the file, we need to
163 * check if the old last block was a fragment. If it was, we need
164 * to rewrite it.
165 */
166
167 if (bpp)
168 *bpp = NULL;
169
170 /* Check for block beyond end of file and fragment extension needed. */
171 lastblock = lblkno(fs, ip->i_size);
172 if (lastblock < NDADDR && lastblock < lbn) {
173 osize = blksize(fs, ip, lastblock);
174 if (osize < fs->lfs_bsize && osize > 0) {
175 if ((error = lfs_fragextend(vp, osize, fs->lfs_bsize,
176 lastblock,
177 (bpp ? &bp : NULL),
178 ap->a_cred)))
179 return (error);
180 ip->i_ffs1_size = ip->i_size =
181 (lastblock + 1) * fs->lfs_bsize;
182 uvm_vnp_setsize(vp, ip->i_size);
183 ip->i_flag |= IN_CHANGE | IN_UPDATE;
184 if (bpp)
185 (void) VOP_BWRITE(bp);
186 }
187 }
188
189 /*
190 * If the block we are writing is a direct block, it's the last
191 * block in the file, and offset + iosize is less than a full
192 * block, we can write one or more fragments. There are two cases:
193 * the block is brand new and we should allocate it the correct
194 * size or it already exists and contains some fragments and
195 * may need to extend it.
196 */
197 if (lbn < NDADDR && lblkno(fs, ip->i_size) <= lbn) {
198 osize = blksize(fs, ip, lbn);
199 nsize = fragroundup(fs, offset + iosize);
200 if (lblktosize(fs, lbn) >= ip->i_size) {
201 /* Brand new block or fragment */
202 frags = numfrags(fs, nsize);
203 bb = fragstofsb(fs, frags);
204 if (!ISSPACE(fs, bb, ap->a_cred))
205 return ENOSPC;
206 if (bpp) {
207 *ap->a_bpp = bp = getblk(vp, lbn, nsize, 0, 0);
208 bp->b_blkno = UNWRITTEN;
209 if (ap->a_flags & B_CLRBUF)
210 clrbuf(bp);
211 }
212 ip->i_lfs_effnblks += bb;
213 simple_lock(&fs->lfs_interlock);
214 ip->i_lfs->lfs_bfree -= bb;
215 simple_unlock(&fs->lfs_interlock);
216 ip->i_ffs1_db[lbn] = UNWRITTEN;
217 } else {
218 if (nsize <= osize) {
219 /* No need to extend */
220 if (bpp && (error = bread(vp, lbn, osize, NOCRED, &bp)))
221 return error;
222 } else {
223 /* Extend existing block */
224 if ((error =
225 lfs_fragextend(vp, osize, nsize, lbn,
226 (bpp ? &bp : NULL),
227 ap->a_cred)))
228 return error;
229 }
230 if (bpp)
231 *bpp = bp;
232 }
233 return 0;
234 }
235
236 error = ufs_bmaparray(vp, lbn, &daddr, &indirs[0], &num, NULL, NULL);
237 if (error)
238 return (error);
239
240 daddr = (daddr_t)((int32_t)daddr); /* XXX ondisk32 */
241 KASSERT(daddr <= LFS_MAX_DADDR);
242
243 /*
244 * Do byte accounting all at once, so we can gracefully fail *before*
245 * we start assigning blocks.
246 */
247 bb = VFSTOUFS(vp->v_mount)->um_seqinc;
248 bcount = 0;
249 if (daddr == UNASSIGNED) {
250 bcount = bb;
251 }
252 for (i = 1; i < num; ++i) {
253 if (!indirs[i].in_exists) {
254 bcount += bb;
255 }
256 }
257 if (ISSPACE(fs, bcount, ap->a_cred)) {
258 simple_lock(&fs->lfs_interlock);
259 ip->i_lfs->lfs_bfree -= bcount;
260 simple_unlock(&fs->lfs_interlock);
261 ip->i_lfs_effnblks += bcount;
262 } else {
263 return ENOSPC;
264 }
265
266 if (daddr == UNASSIGNED) {
267 if (num > 0 && ip->i_ffs1_ib[indirs[0].in_off] == 0) {
268 ip->i_ffs1_ib[indirs[0].in_off] = UNWRITTEN;
269 }
270
271 /*
272 * Create new indirect blocks if necessary
273 */
274 if (num > 1) {
275 idaddr = ip->i_ffs1_ib[indirs[0].in_off];
276 for (i = 1; i < num; ++i) {
277 ibp = getblk(vp, indirs[i].in_lbn,
278 fs->lfs_bsize, 0,0);
279 if (!indirs[i].in_exists) {
280 clrbuf(ibp);
281 ibp->b_blkno = UNWRITTEN;
282 } else if (!(ibp->b_flags & (B_DELWRI | B_DONE))) {
283 ibp->b_blkno = fsbtodb(fs, idaddr);
284 ibp->b_flags |= B_READ;
285 VOP_STRATEGY(vp, ibp);
286 biowait(ibp);
287 }
288 /*
289 * This block exists, but the next one may not.
290 * If that is the case mark it UNWRITTEN to keep
291 * the accounting straight.
292 */
293 /* XXX ondisk32 */
294 if (((int32_t *)ibp->b_data)[indirs[i].in_off] == 0)
295 ((int32_t *)ibp->b_data)[indirs[i].in_off] =
296 UNWRITTEN;
297 /* XXX ondisk32 */
298 idaddr = ((int32_t *)ibp->b_data)[indirs[i].in_off];
299 #ifdef DEBUG
300 if (vp == fs->lfs_ivnode) {
301 LFS_ENTER_LOG("balloc", __FILE__,
302 __LINE__, indirs[i].in_lbn,
303 ibp->b_flags, curproc->p_pid);
304 }
305 #endif
306 if ((error = VOP_BWRITE(ibp)))
307 return error;
308 }
309 }
310 }
311
312
313 /*
314 * Get the existing block from the cache, if requested.
315 */
316 frags = fsbtofrags(fs, bb);
317 if (bpp)
318 *bpp = bp = getblk(vp, lbn, blksize(fs, ip, lbn), 0, 0);
319
320 /*
321 * Do accounting on blocks that represent pages.
322 */
323 if (!bpp)
324 lfs_register_block(vp, lbn);
325
326 /*
327 * The block we are writing may be a brand new block
328 * in which case we need to do accounting.
329 *
330 * We can tell a truly new block because ufs_bmaparray will say
331 * it is UNASSIGNED. Once we allocate it we will assign it the
332 * disk address UNWRITTEN.
333 */
334 if (daddr == UNASSIGNED) {
335 if (bpp) {
336 if (ap->a_flags & B_CLRBUF)
337 clrbuf(bp);
338
339 /* Note the new address */
340 bp->b_blkno = UNWRITTEN;
341 }
342
343 switch (num) {
344 case 0:
345 ip->i_ffs1_db[lbn] = UNWRITTEN;
346 break;
347 case 1:
348 ip->i_ffs1_ib[indirs[0].in_off] = UNWRITTEN;
349 break;
350 default:
351 idp = &indirs[num - 1];
352 if (bread(vp, idp->in_lbn, fs->lfs_bsize, NOCRED,
353 &ibp))
354 panic("lfs_balloc: bread bno %lld",
355 (long long)idp->in_lbn);
356 /* XXX ondisk32 */
357 ((int32_t *)ibp->b_data)[idp->in_off] = UNWRITTEN;
358 #ifdef DEBUG
359 if (vp == fs->lfs_ivnode) {
360 LFS_ENTER_LOG("balloc", __FILE__,
361 __LINE__, idp->in_lbn,
362 ibp->b_flags, curproc->p_pid);
363 }
364 #endif
365 VOP_BWRITE(ibp);
366 }
367 } else if (bpp && !(bp->b_flags & (B_DONE|B_DELWRI))) {
368 /*
369 * Not a brand new block, also not in the cache;
370 * read it in from disk.
371 */
372 if (iosize == fs->lfs_bsize)
373 /* Optimization: I/O is unnecessary. */
374 bp->b_blkno = daddr;
375 else {
376 /*
377 * We need to read the block to preserve the
378 * existing bytes.
379 */
380 bp->b_blkno = daddr;
381 bp->b_flags |= B_READ;
382 VOP_STRATEGY(vp, bp);
383 return (biowait(bp));
384 }
385 }
386
387 return (0);
388 }
389
390 /* VOP_BWRITE 1 time */
391 int
392 lfs_fragextend(struct vnode *vp, int osize, int nsize, daddr_t lbn, struct buf **bpp, struct ucred *cred)
393 {
394 struct inode *ip;
395 struct lfs *fs;
396 long bb;
397 int error;
398 extern long locked_queue_bytes;
399 size_t obufsize;
400
401 ip = VTOI(vp);
402 fs = ip->i_lfs;
403 bb = (long)fragstofsb(fs, numfrags(fs, nsize - osize));
404 error = 0;
405
406 ASSERT_DUNNO_SEGLOCK(fs);
407
408 /*
409 * Get the seglock so we don't enlarge blocks while a segment
410 * is being written. If we're called with bpp==NULL, though,
411 * we are only pretending to change a buffer, so we don't have to
412 * lock.
413 */
414 top:
415 if (bpp) {
416 lockmgr(&fs->lfs_fraglock, LK_SHARED, 0);
417 LFS_DEBUG_COUNTLOCKED("frag");
418 }
419
420 if (!ISSPACE(fs, bb, cred)) {
421 error = ENOSPC;
422 goto out;
423 }
424
425 /*
426 * If we are not asked to actually return the block, all we need
427 * to do is allocate space for it. UBC will handle dirtying the
428 * appropriate things and making sure it all goes to disk.
429 * Don't bother to read in that case.
430 */
431 if (bpp && (error = bread(vp, lbn, osize, NOCRED, bpp))) {
432 brelse(*bpp);
433 goto out;
434 }
435 #ifdef QUOTA
436 if ((error = chkdq(ip, bb, cred, 0))) {
437 if (bpp)
438 brelse(*bpp);
439 goto out;
440 }
441 #endif
442 /*
443 * Adjust accounting for lfs_avail. If there's not enough room,
444 * we will have to wait for the cleaner, which we can't do while
445 * holding a block busy or while holding the seglock. In that case,
446 * release both and start over after waiting.
447 */
448
449 if (bpp && ((*bpp)->b_flags & B_DELWRI)) {
450 if (!lfs_fits(fs, bb)) {
451 if (bpp)
452 brelse(*bpp);
453 #ifdef QUOTA
454 chkdq(ip, -bb, cred, 0);
455 #endif
456 lockmgr(&fs->lfs_fraglock, LK_RELEASE, 0);
457 lfs_availwait(fs, bb);
458 goto top;
459 }
460 fs->lfs_avail -= bb;
461 }
462
463 simple_lock(&fs->lfs_interlock);
464 fs->lfs_bfree -= bb;
465 simple_unlock(&fs->lfs_interlock);
466 ip->i_lfs_effnblks += bb;
467 ip->i_flag |= IN_CHANGE | IN_UPDATE;
468
469 if (bpp) {
470 obufsize = (*bpp)->b_bufsize;
471 allocbuf(*bpp, nsize, 1);
472
473 /* Adjust locked-list accounting */
474 if (((*bpp)->b_flags & (B_LOCKED | B_CALL)) == B_LOCKED) {
475 simple_lock(&lfs_subsys_lock);
476 locked_queue_bytes += (*bpp)->b_bufsize - obufsize;
477 simple_unlock(&lfs_subsys_lock);
478 }
479
480 bzero((char *)((*bpp)->b_data) + osize, (u_int)(nsize - osize));
481 }
482
483 out:
484 if (bpp) {
485 lockmgr(&fs->lfs_fraglock, LK_RELEASE, 0);
486 }
487 return (error);
488 }
489
490 /*
491 * Record this lbn as being "write pending". We used to have this information
492 * on the buffer headers, but since pages don't have buffer headers we
493 * record it here instead.
494 */
495 void
496 lfs_register_block(struct vnode *vp, daddr_t lbn)
497 {
498 struct lfs *fs;
499 struct inode *ip;
500 struct lbnentry *lbp;
501 int hash;
502
503 /* Don't count metadata */
504 if (lbn < 0 || vp->v_type != VREG || VTOI(vp)->i_number == LFS_IFILE_INUM)
505 return;
506
507 ip = VTOI(vp);
508 fs = ip->i_lfs;
509
510 ASSERT_NO_SEGLOCK(fs);
511
512 /* If no space, wait for the cleaner */
513 lfs_availwait(fs, btofsb(fs, 1 << fs->lfs_bshift));
514
515 hash = lbn % LFS_BLIST_HASH_WIDTH;
516 LIST_FOREACH(lbp, &(ip->i_lfs_blist[hash]), entry) {
517 if (lbp->lbn == lbn)
518 return;
519 }
520
521 lbp = (struct lbnentry *)pool_get(&lfs_lbnentry_pool, PR_WAITOK);
522 lbp->lbn = lbn;
523 LIST_INSERT_HEAD(&(ip->i_lfs_blist[hash]), lbp, entry);
524
525 simple_lock(&fs->lfs_interlock);
526 fs->lfs_favail += btofsb(fs, (1 << fs->lfs_bshift));
527 ++locked_fakequeue_count;
528 simple_unlock(&fs->lfs_interlock);
529 }
530
531 static void
532 lfs_do_deregister(struct lfs *fs, struct lbnentry *lbp)
533 {
534 ASSERT_MAYBE_SEGLOCK(fs);
535
536 LIST_REMOVE(lbp, entry);
537 pool_put(&lfs_lbnentry_pool, lbp);
538 simple_lock(&fs->lfs_interlock);
539 if (fs->lfs_favail > btofsb(fs, (1 << fs->lfs_bshift)))
540 fs->lfs_favail -= btofsb(fs, (1 << fs->lfs_bshift));
541 simple_lock(&lfs_subsys_lock);
542 if (locked_fakequeue_count > 0)
543 --locked_fakequeue_count;
544 simple_unlock(&lfs_subsys_lock);
545 simple_unlock(&fs->lfs_interlock);
546 }
547
548 void
549 lfs_deregister_block(struct vnode *vp, daddr_t lbn)
550 {
551 struct lfs *fs;
552 struct inode *ip;
553 struct lbnentry *lbp;
554 int hash;
555
556 /* Don't count metadata */
557 if (lbn < 0 || vp->v_type != VREG || VTOI(vp)->i_number == LFS_IFILE_INUM)
558 return;
559
560 ip = VTOI(vp);
561 fs = ip->i_lfs;
562 hash = lbn % LFS_BLIST_HASH_WIDTH;
563 LIST_FOREACH(lbp, &(ip->i_lfs_blist[hash]), entry) {
564 if (lbp->lbn == lbn)
565 break;
566 }
567 if (lbp == NULL)
568 return;
569
570 lfs_do_deregister(fs, lbp);
571 }
572
573 void
574 lfs_deregister_all(struct vnode *vp)
575 {
576 struct lbnentry *lbp;
577 struct lfs *fs;
578 struct inode *ip;
579 int i;
580
581 ip = VTOI(vp);
582 fs = ip->i_lfs;
583 for (i = 0; i < LFS_BLIST_HASH_WIDTH; i++)
584 while((lbp = LIST_FIRST(&(ip->i_lfs_blist[i]))) != NULL)
585 lfs_do_deregister(fs, lbp);
586 }
587