lfs_balloc.c revision 1.77 1 /* $NetBSD: lfs_balloc.c,v 1.77 2013/06/18 18:18:58 christos Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31 /*
32 * Copyright (c) 1989, 1991, 1993
33 * The Regents of the University of California. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 * @(#)lfs_balloc.c 8.4 (Berkeley) 5/8/95
60 */
61
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: lfs_balloc.c,v 1.77 2013/06/18 18:18:58 christos Exp $");
64
65 #if defined(_KERNEL_OPT)
66 #include "opt_quota.h"
67 #endif
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/buf.h>
72 #include <sys/proc.h>
73 #include <sys/vnode.h>
74 #include <sys/mount.h>
75 #include <sys/resourcevar.h>
76 #include <sys/tree.h>
77 #include <sys/trace.h>
78 #include <sys/kauth.h>
79
80 #include <miscfs/specfs/specdev.h>
81
82 #include <ufs/lfs/ulfs_quotacommon.h>
83 #include <ufs/lfs/ulfs_inode.h>
84 #include <ufs/lfs/ulfsmount.h>
85 #include <ufs/lfs/ulfs_extern.h>
86
87 #include <ufs/lfs/lfs.h>
88 #include <ufs/lfs/lfs_extern.h>
89
90 #include <uvm/uvm.h>
91
92 int lfs_fragextend(struct vnode *, int, int, daddr_t, struct buf **, kauth_cred_t);
93
94 u_int64_t locked_fakequeue_count;
95
96 /*
97 * Allocate a block, and to inode and filesystem block accounting for it
98 * and for any indirect blocks the may need to be created in order for
99 * this block to be created.
100 *
101 * Blocks which have never been accounted for (i.e., which "do not exist")
102 * have disk address 0, which is translated by ulfs_bmap to the special value
103 * UNASSIGNED == -1, as in the historical ULFS.
104 *
105 * Blocks which have been accounted for but which have not yet been written
106 * to disk are given the new special disk address UNWRITTEN == -2, so that
107 * they can be differentiated from completely new blocks.
108 */
109 /* VOP_BWRITE ULFS_NIADDR+2 times */
110 int
111 lfs_balloc(struct vnode *vp, off_t startoffset, int iosize, kauth_cred_t cred,
112 int flags, struct buf **bpp)
113 {
114 int offset;
115 daddr_t daddr, idaddr;
116 struct buf *ibp, *bp;
117 struct inode *ip;
118 struct lfs *fs;
119 struct indir indirs[ULFS_NIADDR+2], *idp;
120 daddr_t lbn, lastblock;
121 int bcount;
122 int error, frags, i, nsize, osize, num;
123
124 ip = VTOI(vp);
125 fs = ip->i_lfs;
126 offset = lfs_blkoff(fs, startoffset);
127 KASSERT(iosize <= fs->lfs_bsize);
128 lbn = lfs_lblkno(fs, startoffset);
129 /* (void)lfs_check(vp, lbn, 0); */
130
131 ASSERT_MAYBE_SEGLOCK(fs);
132
133 /*
134 * Three cases: it's a block beyond the end of file, it's a block in
135 * the file that may or may not have been assigned a disk address or
136 * we're writing an entire block.
137 *
138 * Note, if the daddr is UNWRITTEN, the block already exists in
139 * the cache (it was read or written earlier). If so, make sure
140 * we don't count it as a new block or zero out its contents. If
141 * it did not, make sure we allocate any necessary indirect
142 * blocks.
143 *
144 * If we are writing a block beyond the end of the file, we need to
145 * check if the old last block was a fragment. If it was, we need
146 * to rewrite it.
147 */
148
149 if (bpp)
150 *bpp = NULL;
151
152 /* Check for block beyond end of file and fragment extension needed. */
153 lastblock = lfs_lblkno(fs, ip->i_size);
154 if (lastblock < ULFS_NDADDR && lastblock < lbn) {
155 osize = lfs_blksize(fs, ip, lastblock);
156 if (osize < fs->lfs_bsize && osize > 0) {
157 if ((error = lfs_fragextend(vp, osize, fs->lfs_bsize,
158 lastblock,
159 (bpp ? &bp : NULL), cred)))
160 return (error);
161 ip->i_ffs1_size = ip->i_size =
162 (lastblock + 1) * fs->lfs_bsize;
163 uvm_vnp_setsize(vp, ip->i_size);
164 ip->i_flag |= IN_CHANGE | IN_UPDATE;
165 if (bpp)
166 (void) VOP_BWRITE(bp->b_vp, bp);
167 }
168 }
169
170 /*
171 * If the block we are writing is a direct block, it's the last
172 * block in the file, and offset + iosize is less than a full
173 * block, we can write one or more fragments. There are two cases:
174 * the block is brand new and we should allocate it the correct
175 * size or it already exists and contains some fragments and
176 * may need to extend it.
177 */
178 if (lbn < ULFS_NDADDR && lfs_lblkno(fs, ip->i_size) <= lbn) {
179 osize = lfs_blksize(fs, ip, lbn);
180 nsize = lfs_fragroundup(fs, offset + iosize);
181 if (lfs_lblktosize(fs, lbn) >= ip->i_size) {
182 /* Brand new block or fragment */
183 frags = lfs_numfrags(fs, nsize);
184 if (!ISSPACE(fs, frags, cred))
185 return ENOSPC;
186 if (bpp) {
187 *bpp = bp = getblk(vp, lbn, nsize, 0, 0);
188 bp->b_blkno = UNWRITTEN;
189 if (flags & B_CLRBUF)
190 clrbuf(bp);
191 }
192 ip->i_lfs_effnblks += frags;
193 mutex_enter(&lfs_lock);
194 fs->lfs_bfree -= frags;
195 mutex_exit(&lfs_lock);
196 ip->i_ffs1_db[lbn] = UNWRITTEN;
197 } else {
198 if (nsize <= osize) {
199 /* No need to extend */
200 if (bpp && (error = bread(vp, lbn, osize,
201 NOCRED, 0, &bp)))
202 return error;
203 } else {
204 /* Extend existing block */
205 if ((error =
206 lfs_fragextend(vp, osize, nsize, lbn,
207 (bpp ? &bp : NULL), cred)))
208 return error;
209 }
210 if (bpp)
211 *bpp = bp;
212 }
213 return 0;
214 }
215
216 error = ulfs_bmaparray(vp, lbn, &daddr, &indirs[0], &num, NULL, NULL);
217 if (error)
218 return (error);
219
220 daddr = (daddr_t)((int32_t)daddr); /* XXX ondisk32 */
221 KASSERT(daddr <= LFS_MAX_DADDR);
222
223 /*
224 * Do byte accounting all at once, so we can gracefully fail *before*
225 * we start assigning blocks.
226 */
227 frags = VFSTOULFS(vp->v_mount)->um_seqinc;
228 bcount = 0;
229 if (daddr == UNASSIGNED) {
230 bcount = frags;
231 }
232 for (i = 1; i < num; ++i) {
233 if (!indirs[i].in_exists) {
234 bcount += frags;
235 }
236 }
237 if (ISSPACE(fs, bcount, cred)) {
238 mutex_enter(&lfs_lock);
239 fs->lfs_bfree -= bcount;
240 mutex_exit(&lfs_lock);
241 ip->i_lfs_effnblks += bcount;
242 } else {
243 return ENOSPC;
244 }
245
246 if (daddr == UNASSIGNED) {
247 if (num > 0 && ip->i_ffs1_ib[indirs[0].in_off] == 0) {
248 ip->i_ffs1_ib[indirs[0].in_off] = UNWRITTEN;
249 }
250
251 /*
252 * Create new indirect blocks if necessary
253 */
254 if (num > 1) {
255 idaddr = ip->i_ffs1_ib[indirs[0].in_off];
256 for (i = 1; i < num; ++i) {
257 ibp = getblk(vp, indirs[i].in_lbn,
258 fs->lfs_bsize, 0,0);
259 if (!indirs[i].in_exists) {
260 clrbuf(ibp);
261 ibp->b_blkno = UNWRITTEN;
262 } else if (!(ibp->b_oflags & (BO_DELWRI | BO_DONE))) {
263 ibp->b_blkno = LFS_FSBTODB(fs, idaddr);
264 ibp->b_flags |= B_READ;
265 VOP_STRATEGY(vp, ibp);
266 biowait(ibp);
267 }
268 /*
269 * This block exists, but the next one may not.
270 * If that is the case mark it UNWRITTEN to keep
271 * the accounting straight.
272 */
273 /* XXX ondisk32 */
274 if (((int32_t *)ibp->b_data)[indirs[i].in_off] == 0)
275 ((int32_t *)ibp->b_data)[indirs[i].in_off] =
276 UNWRITTEN;
277 /* XXX ondisk32 */
278 idaddr = ((int32_t *)ibp->b_data)[indirs[i].in_off];
279 #ifdef DEBUG
280 if (vp == fs->lfs_ivnode) {
281 LFS_ENTER_LOG("balloc", __FILE__,
282 __LINE__, indirs[i].in_lbn,
283 ibp->b_flags, curproc->p_pid);
284 }
285 #endif
286 if ((error = VOP_BWRITE(ibp->b_vp, ibp)))
287 return error;
288 }
289 }
290 }
291
292
293 /*
294 * Get the existing block from the cache, if requested.
295 */
296 if (bpp)
297 *bpp = bp = getblk(vp, lbn, lfs_blksize(fs, ip, lbn), 0, 0);
298
299 /*
300 * Do accounting on blocks that represent pages.
301 */
302 if (!bpp)
303 lfs_register_block(vp, lbn);
304
305 /*
306 * The block we are writing may be a brand new block
307 * in which case we need to do accounting.
308 *
309 * We can tell a truly new block because ulfs_bmaparray will say
310 * it is UNASSIGNED. Once we allocate it we will assign it the
311 * disk address UNWRITTEN.
312 */
313 if (daddr == UNASSIGNED) {
314 if (bpp) {
315 if (flags & B_CLRBUF)
316 clrbuf(bp);
317
318 /* Note the new address */
319 bp->b_blkno = UNWRITTEN;
320 }
321
322 switch (num) {
323 case 0:
324 ip->i_ffs1_db[lbn] = UNWRITTEN;
325 break;
326 case 1:
327 ip->i_ffs1_ib[indirs[0].in_off] = UNWRITTEN;
328 break;
329 default:
330 idp = &indirs[num - 1];
331 if (bread(vp, idp->in_lbn, fs->lfs_bsize, NOCRED,
332 B_MODIFY, &ibp))
333 panic("lfs_balloc: bread bno %lld",
334 (long long)idp->in_lbn);
335 /* XXX ondisk32 */
336 ((int32_t *)ibp->b_data)[idp->in_off] = UNWRITTEN;
337 #ifdef DEBUG
338 if (vp == fs->lfs_ivnode) {
339 LFS_ENTER_LOG("balloc", __FILE__,
340 __LINE__, idp->in_lbn,
341 ibp->b_flags, curproc->p_pid);
342 }
343 #endif
344 VOP_BWRITE(ibp->b_vp, ibp);
345 }
346 } else if (bpp && !(bp->b_oflags & (BO_DONE|BO_DELWRI))) {
347 /*
348 * Not a brand new block, also not in the cache;
349 * read it in from disk.
350 */
351 if (iosize == fs->lfs_bsize)
352 /* Optimization: I/O is unnecessary. */
353 bp->b_blkno = daddr;
354 else {
355 /*
356 * We need to read the block to preserve the
357 * existing bytes.
358 */
359 bp->b_blkno = daddr;
360 bp->b_flags |= B_READ;
361 VOP_STRATEGY(vp, bp);
362 return (biowait(bp));
363 }
364 }
365
366 return (0);
367 }
368
369 /* VOP_BWRITE 1 time */
370 int
371 lfs_fragextend(struct vnode *vp, int osize, int nsize, daddr_t lbn, struct buf **bpp,
372 kauth_cred_t cred)
373 {
374 struct inode *ip;
375 struct lfs *fs;
376 long frags;
377 int error;
378 extern long locked_queue_bytes;
379 size_t obufsize;
380
381 ip = VTOI(vp);
382 fs = ip->i_lfs;
383 frags = (long)lfs_numfrags(fs, nsize - osize);
384 error = 0;
385
386 ASSERT_NO_SEGLOCK(fs);
387
388 /*
389 * Get the seglock so we don't enlarge blocks while a segment
390 * is being written. If we're called with bpp==NULL, though,
391 * we are only pretending to change a buffer, so we don't have to
392 * lock.
393 */
394 top:
395 if (bpp) {
396 rw_enter(&fs->lfs_fraglock, RW_READER);
397 LFS_DEBUG_COUNTLOCKED("frag");
398 }
399
400 if (!ISSPACE(fs, frags, cred)) {
401 error = ENOSPC;
402 goto out;
403 }
404
405 /*
406 * If we are not asked to actually return the block, all we need
407 * to do is allocate space for it. UBC will handle dirtying the
408 * appropriate things and making sure it all goes to disk.
409 * Don't bother to read in that case.
410 */
411 if (bpp && (error = bread(vp, lbn, osize, NOCRED, 0, bpp))) {
412 goto out;
413 }
414 #ifdef LFS_QUOTA
415 if ((error = lfs_chkdq(ip, frags, cred, 0))) {
416 if (bpp)
417 brelse(*bpp, 0);
418 goto out;
419 }
420 #endif
421 /*
422 * Adjust accounting for lfs_avail. If there's not enough room,
423 * we will have to wait for the cleaner, which we can't do while
424 * holding a block busy or while holding the seglock. In that case,
425 * release both and start over after waiting.
426 */
427
428 if (bpp && ((*bpp)->b_oflags & BO_DELWRI)) {
429 if (!lfs_fits(fs, frags)) {
430 if (bpp)
431 brelse(*bpp, 0);
432 #ifdef LFS_QUOTA
433 lfs_chkdq(ip, -frags, cred, 0);
434 #endif
435 rw_exit(&fs->lfs_fraglock);
436 lfs_availwait(fs, frags);
437 goto top;
438 }
439 fs->lfs_avail -= frags;
440 }
441
442 mutex_enter(&lfs_lock);
443 fs->lfs_bfree -= frags;
444 mutex_exit(&lfs_lock);
445 ip->i_lfs_effnblks += frags;
446 ip->i_flag |= IN_CHANGE | IN_UPDATE;
447
448 if (bpp) {
449 obufsize = (*bpp)->b_bufsize;
450 allocbuf(*bpp, nsize, 1);
451
452 /* Adjust locked-list accounting */
453 if (((*bpp)->b_flags & B_LOCKED) != 0 &&
454 (*bpp)->b_iodone == NULL) {
455 mutex_enter(&lfs_lock);
456 locked_queue_bytes += (*bpp)->b_bufsize - obufsize;
457 mutex_exit(&lfs_lock);
458 }
459
460 memset((char *)((*bpp)->b_data) + osize, 0, (u_int)(nsize - osize));
461 }
462
463 out:
464 if (bpp) {
465 rw_exit(&fs->lfs_fraglock);
466 }
467 return (error);
468 }
469
470 static inline int
471 lge(struct lbnentry *a, struct lbnentry *b)
472 {
473 return a->lbn - b->lbn;
474 }
475
476 SPLAY_PROTOTYPE(lfs_splay, lbnentry, entry, lge);
477
478 SPLAY_GENERATE(lfs_splay, lbnentry, entry, lge);
479
480 /*
481 * Record this lbn as being "write pending". We used to have this information
482 * on the buffer headers, but since pages don't have buffer headers we
483 * record it here instead.
484 */
485 void
486 lfs_register_block(struct vnode *vp, daddr_t lbn)
487 {
488 struct lfs *fs;
489 struct inode *ip;
490 struct lbnentry *lbp;
491
492 ip = VTOI(vp);
493
494 /* Don't count metadata */
495 if (lbn < 0 || vp->v_type != VREG || ip->i_number == LFS_IFILE_INUM)
496 return;
497
498 fs = ip->i_lfs;
499
500 ASSERT_NO_SEGLOCK(fs);
501
502 /* If no space, wait for the cleaner */
503 lfs_availwait(fs, lfs_btofsb(fs, 1 << fs->lfs_bshift));
504
505 lbp = (struct lbnentry *)pool_get(&lfs_lbnentry_pool, PR_WAITOK);
506 lbp->lbn = lbn;
507 mutex_enter(&lfs_lock);
508 if (SPLAY_INSERT(lfs_splay, &ip->i_lfs_lbtree, lbp) != NULL) {
509 mutex_exit(&lfs_lock);
510 /* Already there */
511 pool_put(&lfs_lbnentry_pool, lbp);
512 return;
513 }
514
515 ++ip->i_lfs_nbtree;
516 fs->lfs_favail += lfs_btofsb(fs, (1 << fs->lfs_bshift));
517 fs->lfs_pages += fs->lfs_bsize >> PAGE_SHIFT;
518 ++locked_fakequeue_count;
519 lfs_subsys_pages += fs->lfs_bsize >> PAGE_SHIFT;
520 mutex_exit(&lfs_lock);
521 }
522
523 static void
524 lfs_do_deregister(struct lfs *fs, struct inode *ip, struct lbnentry *lbp)
525 {
526 ASSERT_MAYBE_SEGLOCK(fs);
527
528 mutex_enter(&lfs_lock);
529 --ip->i_lfs_nbtree;
530 SPLAY_REMOVE(lfs_splay, &ip->i_lfs_lbtree, lbp);
531 if (fs->lfs_favail > lfs_btofsb(fs, (1 << fs->lfs_bshift)))
532 fs->lfs_favail -= lfs_btofsb(fs, (1 << fs->lfs_bshift));
533 fs->lfs_pages -= fs->lfs_bsize >> PAGE_SHIFT;
534 if (locked_fakequeue_count > 0)
535 --locked_fakequeue_count;
536 lfs_subsys_pages -= fs->lfs_bsize >> PAGE_SHIFT;
537 mutex_exit(&lfs_lock);
538
539 pool_put(&lfs_lbnentry_pool, lbp);
540 }
541
542 void
543 lfs_deregister_block(struct vnode *vp, daddr_t lbn)
544 {
545 struct lfs *fs;
546 struct inode *ip;
547 struct lbnentry *lbp;
548 struct lbnentry tmp;
549
550 ip = VTOI(vp);
551
552 /* Don't count metadata */
553 if (lbn < 0 || vp->v_type != VREG || ip->i_number == LFS_IFILE_INUM)
554 return;
555
556 fs = ip->i_lfs;
557 tmp.lbn = lbn;
558 lbp = SPLAY_FIND(lfs_splay, &ip->i_lfs_lbtree, &tmp);
559 if (lbp == NULL)
560 return;
561
562 lfs_do_deregister(fs, ip, lbp);
563 }
564
565 void
566 lfs_deregister_all(struct vnode *vp)
567 {
568 struct lbnentry *lbp, *nlbp;
569 struct lfs_splay *hd;
570 struct lfs *fs;
571 struct inode *ip;
572
573 ip = VTOI(vp);
574 fs = ip->i_lfs;
575 hd = &ip->i_lfs_lbtree;
576
577 for (lbp = SPLAY_MIN(lfs_splay, hd); lbp != NULL; lbp = nlbp) {
578 nlbp = SPLAY_NEXT(lfs_splay, hd, lbp);
579 lfs_do_deregister(fs, ip, lbp);
580 }
581 }
582