lfs_balloc.c revision 1.88 1 /* $NetBSD: lfs_balloc.c,v 1.88 2015/10/10 22:33:31 dholland Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31 /*
32 * Copyright (c) 1989, 1991, 1993
33 * The Regents of the University of California. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 * @(#)lfs_balloc.c 8.4 (Berkeley) 5/8/95
60 */
61
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: lfs_balloc.c,v 1.88 2015/10/10 22:33:31 dholland Exp $");
64
65 #if defined(_KERNEL_OPT)
66 #include "opt_quota.h"
67 #endif
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/buf.h>
72 #include <sys/proc.h>
73 #include <sys/vnode.h>
74 #include <sys/mount.h>
75 #include <sys/resourcevar.h>
76 #include <sys/tree.h>
77 #include <sys/trace.h>
78 #include <sys/kauth.h>
79
80 #include <miscfs/specfs/specdev.h>
81
82 #include <ufs/lfs/ulfs_quotacommon.h>
83 #include <ufs/lfs/ulfs_inode.h>
84 #include <ufs/lfs/ulfsmount.h>
85 #include <ufs/lfs/ulfs_extern.h>
86
87 #include <ufs/lfs/lfs.h>
88 #include <ufs/lfs/lfs_accessors.h>
89 #include <ufs/lfs/lfs_extern.h>
90 #include <ufs/lfs/lfs_kernel.h>
91
92 #include <uvm/uvm.h>
93
94 int lfs_fragextend(struct vnode *, int, int, daddr_t, struct buf **, kauth_cred_t);
95
96 u_int64_t locked_fakequeue_count;
97
98 /*
99 * Allocate a block, and to inode and filesystem block accounting for it
100 * and for any indirect blocks the may need to be created in order for
101 * this block to be created.
102 *
103 * Blocks which have never been accounted for (i.e., which "do not exist")
104 * have disk address 0, which is translated by ulfs_bmap to the special value
105 * UNASSIGNED == -1, as in the historical ULFS.
106 *
107 * Blocks which have been accounted for but which have not yet been written
108 * to disk are given the new special disk address UNWRITTEN == -2, so that
109 * they can be differentiated from completely new blocks.
110 */
111 /* VOP_BWRITE ULFS_NIADDR+2 times */
112 int
113 lfs_balloc(struct vnode *vp, off_t startoffset, int iosize, kauth_cred_t cred,
114 int flags, struct buf **bpp)
115 {
116 int offset;
117 daddr_t daddr, idaddr;
118 struct buf *ibp, *bp;
119 struct inode *ip;
120 struct lfs *fs;
121 struct indir indirs[ULFS_NIADDR+2], *idp;
122 daddr_t lbn, lastblock;
123 int bcount;
124 int error, frags, i, nsize, osize, num;
125
126 ip = VTOI(vp);
127 fs = ip->i_lfs;
128 offset = lfs_blkoff(fs, startoffset);
129 KASSERT(iosize <= lfs_sb_getbsize(fs));
130 lbn = lfs_lblkno(fs, startoffset);
131 /* (void)lfs_check(vp, lbn, 0); */
132
133 ASSERT_MAYBE_SEGLOCK(fs);
134
135 /*
136 * Three cases: it's a block beyond the end of file, it's a block in
137 * the file that may or may not have been assigned a disk address or
138 * we're writing an entire block.
139 *
140 * Note, if the daddr is UNWRITTEN, the block already exists in
141 * the cache (it was read or written earlier). If so, make sure
142 * we don't count it as a new block or zero out its contents. If
143 * it did not, make sure we allocate any necessary indirect
144 * blocks.
145 *
146 * If we are writing a block beyond the end of the file, we need to
147 * check if the old last block was a fragment. If it was, we need
148 * to rewrite it.
149 */
150
151 if (bpp)
152 *bpp = NULL;
153
154 /* Check for block beyond end of file and fragment extension needed. */
155 lastblock = lfs_lblkno(fs, ip->i_size);
156 if (lastblock < ULFS_NDADDR && lastblock < lbn) {
157 osize = lfs_blksize(fs, ip, lastblock);
158 if (osize < lfs_sb_getbsize(fs) && osize > 0) {
159 if ((error = lfs_fragextend(vp, osize, lfs_sb_getbsize(fs),
160 lastblock,
161 (bpp ? &bp : NULL), cred)))
162 return (error);
163 ip->i_size = (lastblock + 1) * lfs_sb_getbsize(fs);
164 lfs_dino_setsize(fs, ip->i_din, ip->i_size);
165 uvm_vnp_setsize(vp, ip->i_size);
166 ip->i_flag |= IN_CHANGE | IN_UPDATE;
167 if (bpp)
168 (void) VOP_BWRITE(bp->b_vp, bp);
169 }
170 }
171
172 /*
173 * If the block we are writing is a direct block, it's the last
174 * block in the file, and offset + iosize is less than a full
175 * block, we can write one or more fragments. There are two cases:
176 * the block is brand new and we should allocate it the correct
177 * size or it already exists and contains some fragments and
178 * may need to extend it.
179 */
180 if (lbn < ULFS_NDADDR && lfs_lblkno(fs, ip->i_size) <= lbn) {
181 osize = lfs_blksize(fs, ip, lbn);
182 nsize = lfs_fragroundup(fs, offset + iosize);
183 if (lfs_lblktosize(fs, lbn) >= ip->i_size) {
184 /* Brand new block or fragment */
185 frags = lfs_numfrags(fs, nsize);
186 if (!ISSPACE(fs, frags, cred))
187 return ENOSPC;
188 if (bpp) {
189 *bpp = bp = getblk(vp, lbn, nsize, 0, 0);
190 bp->b_blkno = UNWRITTEN;
191 if (flags & B_CLRBUF)
192 clrbuf(bp);
193 }
194 ip->i_lfs_effnblks += frags;
195 mutex_enter(&lfs_lock);
196 lfs_sb_subbfree(fs, frags);
197 mutex_exit(&lfs_lock);
198 lfs_dino_setdb(fs, ip->i_din, lbn, UNWRITTEN);
199 } else {
200 if (nsize <= osize) {
201 /* No need to extend */
202 if (bpp && (error = bread(vp, lbn, osize,
203 0, &bp)))
204 return error;
205 } else {
206 /* Extend existing block */
207 if ((error =
208 lfs_fragextend(vp, osize, nsize, lbn,
209 (bpp ? &bp : NULL), cred)))
210 return error;
211 }
212 if (bpp)
213 *bpp = bp;
214 }
215 return 0;
216 }
217
218 error = ulfs_bmaparray(vp, lbn, &daddr, &indirs[0], &num, NULL, NULL);
219 if (error)
220 return (error);
221
222 KASSERT(daddr <= LFS_MAX_DADDR(fs));
223
224 /*
225 * Do byte accounting all at once, so we can gracefully fail *before*
226 * we start assigning blocks.
227 */
228 frags = fs->um_seqinc;
229 bcount = 0;
230 if (daddr == UNASSIGNED) {
231 bcount = frags;
232 }
233 for (i = 1; i < num; ++i) {
234 if (!indirs[i].in_exists) {
235 bcount += frags;
236 }
237 }
238 if (ISSPACE(fs, bcount, cred)) {
239 mutex_enter(&lfs_lock);
240 lfs_sb_subbfree(fs, bcount);
241 mutex_exit(&lfs_lock);
242 ip->i_lfs_effnblks += bcount;
243 } else {
244 return ENOSPC;
245 }
246
247 if (daddr == UNASSIGNED) {
248 if (num > 0 && lfs_dino_getib(fs, ip->i_din, indirs[0].in_off) == 0) {
249 lfs_dino_setib(fs, ip->i_din, indirs[0].in_off, UNWRITTEN);
250 }
251
252 /*
253 * Create new indirect blocks if necessary
254 */
255 if (num > 1) {
256 idaddr = lfs_dino_getib(fs, ip->i_din, indirs[0].in_off);
257 for (i = 1; i < num; ++i) {
258 ibp = getblk(vp, indirs[i].in_lbn,
259 lfs_sb_getbsize(fs), 0,0);
260 if (!indirs[i].in_exists) {
261 clrbuf(ibp);
262 ibp->b_blkno = UNWRITTEN;
263 } else if (!(ibp->b_oflags & (BO_DELWRI | BO_DONE))) {
264 ibp->b_blkno = LFS_FSBTODB(fs, idaddr);
265 ibp->b_flags |= B_READ;
266 VOP_STRATEGY(vp, ibp);
267 biowait(ibp);
268 }
269 /*
270 * This block exists, but the next one may not.
271 * If that is the case mark it UNWRITTEN to keep
272 * the accounting straight.
273 */
274 if (lfs_iblock_get(fs, ibp->b_data, indirs[i].in_off) == 0)
275 lfs_iblock_set(fs, ibp->b_data, indirs[i].in_off,
276 UNWRITTEN);
277 idaddr = lfs_iblock_get(fs, ibp->b_data, indirs[i].in_off);
278 #ifdef DEBUG
279 if (vp == fs->lfs_ivnode) {
280 LFS_ENTER_LOG("balloc", __FILE__,
281 __LINE__, indirs[i].in_lbn,
282 ibp->b_flags, curproc->p_pid);
283 }
284 #endif
285 if ((error = VOP_BWRITE(ibp->b_vp, ibp)))
286 return error;
287 }
288 }
289 }
290
291
292 /*
293 * Get the existing block from the cache, if requested.
294 */
295 if (bpp)
296 *bpp = bp = getblk(vp, lbn, lfs_blksize(fs, ip, lbn), 0, 0);
297
298 /*
299 * Do accounting on blocks that represent pages.
300 */
301 if (!bpp)
302 lfs_register_block(vp, lbn);
303
304 /*
305 * The block we are writing may be a brand new block
306 * in which case we need to do accounting.
307 *
308 * We can tell a truly new block because ulfs_bmaparray will say
309 * it is UNASSIGNED. Once we allocate it we will assign it the
310 * disk address UNWRITTEN.
311 */
312 if (daddr == UNASSIGNED) {
313 if (bpp) {
314 if (flags & B_CLRBUF)
315 clrbuf(bp);
316
317 /* Note the new address */
318 bp->b_blkno = UNWRITTEN;
319 }
320
321 switch (num) {
322 case 0:
323 lfs_dino_setdb(fs, ip->i_din, lbn, UNWRITTEN);
324 break;
325 case 1:
326 lfs_dino_setib(fs, ip->i_din, indirs[0].in_off, UNWRITTEN);
327 break;
328 default:
329 idp = &indirs[num - 1];
330 if (bread(vp, idp->in_lbn, lfs_sb_getbsize(fs),
331 B_MODIFY, &ibp))
332 panic("lfs_balloc: bread bno %lld",
333 (long long)idp->in_lbn);
334 lfs_iblock_set(fs, ibp->b_data, idp->in_off, UNWRITTEN);
335 #ifdef DEBUG
336 if (vp == fs->lfs_ivnode) {
337 LFS_ENTER_LOG("balloc", __FILE__,
338 __LINE__, idp->in_lbn,
339 ibp->b_flags, curproc->p_pid);
340 }
341 #endif
342 VOP_BWRITE(ibp->b_vp, ibp);
343 }
344 } else if (bpp && !(bp->b_oflags & (BO_DONE|BO_DELWRI))) {
345 /*
346 * Not a brand new block, also not in the cache;
347 * read it in from disk.
348 */
349 if (iosize == lfs_sb_getbsize(fs))
350 /* Optimization: I/O is unnecessary. */
351 bp->b_blkno = daddr;
352 else {
353 /*
354 * We need to read the block to preserve the
355 * existing bytes.
356 */
357 bp->b_blkno = daddr;
358 bp->b_flags |= B_READ;
359 VOP_STRATEGY(vp, bp);
360 return (biowait(bp));
361 }
362 }
363
364 return (0);
365 }
366
367 /* VOP_BWRITE 1 time */
368 int
369 lfs_fragextend(struct vnode *vp, int osize, int nsize, daddr_t lbn, struct buf **bpp,
370 kauth_cred_t cred)
371 {
372 struct inode *ip;
373 struct lfs *fs;
374 long frags;
375 int error;
376 extern long locked_queue_bytes;
377 size_t obufsize;
378
379 ip = VTOI(vp);
380 fs = ip->i_lfs;
381 frags = (long)lfs_numfrags(fs, nsize - osize);
382 error = 0;
383
384 ASSERT_NO_SEGLOCK(fs);
385
386 /*
387 * Get the seglock so we don't enlarge blocks while a segment
388 * is being written. If we're called with bpp==NULL, though,
389 * we are only pretending to change a buffer, so we don't have to
390 * lock.
391 */
392 top:
393 if (bpp) {
394 rw_enter(&fs->lfs_fraglock, RW_READER);
395 LFS_DEBUG_COUNTLOCKED("frag");
396 }
397
398 if (!ISSPACE(fs, frags, cred)) {
399 error = ENOSPC;
400 goto out;
401 }
402
403 /*
404 * If we are not asked to actually return the block, all we need
405 * to do is allocate space for it. UBC will handle dirtying the
406 * appropriate things and making sure it all goes to disk.
407 * Don't bother to read in that case.
408 */
409 if (bpp && (error = bread(vp, lbn, osize, 0, bpp))) {
410 goto out;
411 }
412 #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
413 if ((error = lfs_chkdq(ip, frags, cred, 0))) {
414 if (bpp)
415 brelse(*bpp, 0);
416 goto out;
417 }
418 #endif
419 /*
420 * Adjust accounting for lfs_avail. If there's not enough room,
421 * we will have to wait for the cleaner, which we can't do while
422 * holding a block busy or while holding the seglock. In that case,
423 * release both and start over after waiting.
424 */
425
426 if (bpp && ((*bpp)->b_oflags & BO_DELWRI)) {
427 if (!lfs_fits(fs, frags)) {
428 if (bpp)
429 brelse(*bpp, 0);
430 #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
431 lfs_chkdq(ip, -frags, cred, 0);
432 #endif
433 rw_exit(&fs->lfs_fraglock);
434 lfs_availwait(fs, frags);
435 goto top;
436 }
437 lfs_sb_subavail(fs, frags);
438 }
439
440 mutex_enter(&lfs_lock);
441 lfs_sb_subbfree(fs, frags);
442 mutex_exit(&lfs_lock);
443 ip->i_lfs_effnblks += frags;
444 ip->i_flag |= IN_CHANGE | IN_UPDATE;
445
446 if (bpp) {
447 obufsize = (*bpp)->b_bufsize;
448 allocbuf(*bpp, nsize, 1);
449
450 /* Adjust locked-list accounting */
451 if (((*bpp)->b_flags & B_LOCKED) != 0 &&
452 (*bpp)->b_iodone == NULL) {
453 mutex_enter(&lfs_lock);
454 locked_queue_bytes += (*bpp)->b_bufsize - obufsize;
455 mutex_exit(&lfs_lock);
456 }
457
458 memset((char *)((*bpp)->b_data) + osize, 0, (u_int)(nsize - osize));
459 }
460
461 out:
462 if (bpp) {
463 rw_exit(&fs->lfs_fraglock);
464 }
465 return (error);
466 }
467
468 static inline int
469 lge(struct lbnentry *a, struct lbnentry *b)
470 {
471 return a->lbn - b->lbn;
472 }
473
474 SPLAY_PROTOTYPE(lfs_splay, lbnentry, entry, lge);
475
476 SPLAY_GENERATE(lfs_splay, lbnentry, entry, lge);
477
478 /*
479 * Record this lbn as being "write pending". We used to have this information
480 * on the buffer headers, but since pages don't have buffer headers we
481 * record it here instead.
482 */
483 void
484 lfs_register_block(struct vnode *vp, daddr_t lbn)
485 {
486 struct lfs *fs;
487 struct inode *ip;
488 struct lbnentry *lbp;
489
490 ip = VTOI(vp);
491
492 /* Don't count metadata */
493 if (lbn < 0 || vp->v_type != VREG || ip->i_number == LFS_IFILE_INUM)
494 return;
495
496 fs = ip->i_lfs;
497
498 ASSERT_NO_SEGLOCK(fs);
499
500 /* If no space, wait for the cleaner */
501 lfs_availwait(fs, lfs_btofsb(fs, 1 << lfs_sb_getbshift(fs)));
502
503 lbp = (struct lbnentry *)pool_get(&lfs_lbnentry_pool, PR_WAITOK);
504 lbp->lbn = lbn;
505 mutex_enter(&lfs_lock);
506 if (SPLAY_INSERT(lfs_splay, &ip->i_lfs_lbtree, lbp) != NULL) {
507 mutex_exit(&lfs_lock);
508 /* Already there */
509 pool_put(&lfs_lbnentry_pool, lbp);
510 return;
511 }
512
513 ++ip->i_lfs_nbtree;
514 fs->lfs_favail += lfs_btofsb(fs, (1 << lfs_sb_getbshift(fs)));
515 fs->lfs_pages += lfs_sb_getbsize(fs) >> PAGE_SHIFT;
516 ++locked_fakequeue_count;
517 lfs_subsys_pages += lfs_sb_getbsize(fs) >> PAGE_SHIFT;
518 mutex_exit(&lfs_lock);
519 }
520
521 static void
522 lfs_do_deregister(struct lfs *fs, struct inode *ip, struct lbnentry *lbp)
523 {
524 ASSERT_MAYBE_SEGLOCK(fs);
525
526 mutex_enter(&lfs_lock);
527 --ip->i_lfs_nbtree;
528 SPLAY_REMOVE(lfs_splay, &ip->i_lfs_lbtree, lbp);
529 if (fs->lfs_favail > lfs_btofsb(fs, (1 << lfs_sb_getbshift(fs))))
530 fs->lfs_favail -= lfs_btofsb(fs, (1 << lfs_sb_getbshift(fs)));
531 fs->lfs_pages -= lfs_sb_getbsize(fs) >> PAGE_SHIFT;
532 if (locked_fakequeue_count > 0)
533 --locked_fakequeue_count;
534 lfs_subsys_pages -= lfs_sb_getbsize(fs) >> PAGE_SHIFT;
535 mutex_exit(&lfs_lock);
536
537 pool_put(&lfs_lbnentry_pool, lbp);
538 }
539
540 void
541 lfs_deregister_block(struct vnode *vp, daddr_t lbn)
542 {
543 struct lfs *fs;
544 struct inode *ip;
545 struct lbnentry *lbp;
546 struct lbnentry tmp;
547
548 ip = VTOI(vp);
549
550 /* Don't count metadata */
551 if (lbn < 0 || vp->v_type != VREG || ip->i_number == LFS_IFILE_INUM)
552 return;
553
554 fs = ip->i_lfs;
555 tmp.lbn = lbn;
556 lbp = SPLAY_FIND(lfs_splay, &ip->i_lfs_lbtree, &tmp);
557 if (lbp == NULL)
558 return;
559
560 lfs_do_deregister(fs, ip, lbp);
561 }
562
563 void
564 lfs_deregister_all(struct vnode *vp)
565 {
566 struct lbnentry *lbp, *nlbp;
567 struct lfs_splay *hd;
568 struct lfs *fs;
569 struct inode *ip;
570
571 ip = VTOI(vp);
572 fs = ip->i_lfs;
573 hd = &ip->i_lfs_lbtree;
574
575 for (lbp = SPLAY_MIN(lfs_splay, hd); lbp != NULL; lbp = nlbp) {
576 nlbp = SPLAY_NEXT(lfs_splay, hd, lbp);
577 lfs_do_deregister(fs, ip, lbp);
578 }
579 }
580