Home | History | Annotate | Line # | Download | only in lfs
ulfs_bmap.c revision 1.7
      1 /*	$NetBSD: ulfs_bmap.c,v 1.7 2015/09/01 06:08:37 dholland Exp $	*/
      2 /*  from NetBSD: ufs_bmap.c,v 1.50 2013/01/22 09:39:18 dholland Exp  */
      3 
      4 /*
      5  * Copyright (c) 1989, 1991, 1993
      6  *	The Regents of the University of California.  All rights reserved.
      7  * (c) UNIX System Laboratories, Inc.
      8  * All or some portions of this file are derived from material licensed
      9  * to the University of California by American Telephone and Telegraph
     10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     11  * the permission of UNIX System Laboratories, Inc.
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  * 3. Neither the name of the University nor the names of its contributors
     22  *    may be used to endorse or promote products derived from this software
     23  *    without specific prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35  * SUCH DAMAGE.
     36  *
     37  *	@(#)ufs_bmap.c	8.8 (Berkeley) 8/11/95
     38  */
     39 
     40 #include <sys/cdefs.h>
     41 __KERNEL_RCSID(0, "$NetBSD: ulfs_bmap.c,v 1.7 2015/09/01 06:08:37 dholland Exp $");
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/stat.h>
     46 #include <sys/buf.h>
     47 #include <sys/proc.h>
     48 #include <sys/vnode.h>
     49 #include <sys/mount.h>
     50 #include <sys/resourcevar.h>
     51 #include <sys/trace.h>
     52 #include <sys/fstrans.h>
     53 
     54 #include <miscfs/specfs/specdev.h>
     55 
     56 #include <ufs/lfs/ulfs_inode.h>
     57 #include <ufs/lfs/ulfsmount.h>
     58 #include <ufs/lfs/ulfs_extern.h>
     59 #include <ufs/lfs/ulfs_bswap.h>
     60 
     61 static bool
     62 ulfs_issequential(const struct lfs *fs, daddr_t daddr0, daddr_t daddr1)
     63 {
     64 
     65 	/* for ulfs, blocks in a hole is not 'contiguous'. */
     66 	if (daddr0 == 0)
     67 		return false;
     68 
     69 	return (daddr0 + fs->um_seqinc == daddr1);
     70 }
     71 
     72 /*
     73  * This is used for block pointers in inodes and elsewhere, which can
     74  * contain the magic value UNWRITTEN, which is -2. This is mishandled
     75  * by u32 -> u64 promotion unless special-cased.
     76  *
     77  * XXX this should be rolled into better inode accessors and go away.
     78  */
     79 static inline uint64_t
     80 ulfs_fix_unwritten(uint32_t val)
     81 {
     82 	if (val == (uint32_t)UNWRITTEN) {
     83 		return (uint64_t)(int64_t)UNWRITTEN;
     84 	} else {
     85 		return val;
     86 	}
     87 }
     88 
     89 
     90 /*
     91  * Bmap converts the logical block number of a file to its physical block
     92  * number on the disk. The conversion is done by using the logical block
     93  * number to index into the array of block pointers described by the dinode.
     94  */
     95 int
     96 ulfs_bmap(void *v)
     97 {
     98 	struct vop_bmap_args /* {
     99 		struct vnode *a_vp;
    100 		daddr_t  a_bn;
    101 		struct vnode **a_vpp;
    102 		daddr_t *a_bnp;
    103 		int *a_runp;
    104 	} */ *ap = v;
    105 	int error;
    106 
    107 	/*
    108 	 * Check for underlying vnode requests and ensure that logical
    109 	 * to physical mapping is requested.
    110 	 */
    111 	if (ap->a_vpp != NULL)
    112 		*ap->a_vpp = VTOI(ap->a_vp)->i_devvp;
    113 	if (ap->a_bnp == NULL)
    114 		return (0);
    115 
    116 	fstrans_start(ap->a_vp->v_mount, FSTRANS_SHARED);
    117 	error = ulfs_bmaparray(ap->a_vp, ap->a_bn, ap->a_bnp, NULL, NULL,
    118 	    ap->a_runp, ulfs_issequential);
    119 	fstrans_done(ap->a_vp->v_mount);
    120 	return error;
    121 }
    122 
    123 /*
    124  * Indirect blocks are now on the vnode for the file.  They are given negative
    125  * logical block numbers.  Indirect blocks are addressed by the negative
    126  * address of the first data block to which they point.  Double indirect blocks
    127  * are addressed by one less than the address of the first indirect block to
    128  * which they point.  Triple indirect blocks are addressed by one less than
    129  * the address of the first double indirect block to which they point.
    130  *
    131  * ulfs_bmaparray does the bmap conversion, and if requested returns the
    132  * array of logical blocks which must be traversed to get to a block.
    133  * Each entry contains the offset into that block that gets you to the
    134  * next block and the disk address of the block (if it is assigned).
    135  */
    136 
    137 int
    138 ulfs_bmaparray(struct vnode *vp, daddr_t bn, daddr_t *bnp, struct indir *ap,
    139     int *nump, int *runp, ulfs_issequential_callback_t is_sequential)
    140 {
    141 	struct inode *ip;
    142 	struct buf *bp, *cbp;
    143 	struct ulfsmount *ump;
    144 	struct lfs *fs;
    145 	struct mount *mp;
    146 	struct indir a[ULFS_NIADDR + 1], *xap;
    147 	daddr_t daddr;
    148 	daddr_t metalbn;
    149 	int error, maxrun = 0, num;
    150 
    151 	ip = VTOI(vp);
    152 	mp = vp->v_mount;
    153 	ump = ip->i_ump;
    154 	fs = ip->i_lfs;
    155 #ifdef DIAGNOSTIC
    156 	if ((ap != NULL && nump == NULL) || (ap == NULL && nump != NULL))
    157 		panic("ulfs_bmaparray: invalid arguments");
    158 #endif
    159 
    160 	if (runp) {
    161 		/*
    162 		 * XXX
    163 		 * If MAXBSIZE is the largest transfer the disks can handle,
    164 		 * we probably want maxrun to be 1 block less so that we
    165 		 * don't create a block larger than the device can handle.
    166 		 */
    167 		*runp = 0;
    168 		maxrun = MAXPHYS / mp->mnt_stat.f_iosize - 1;
    169 	}
    170 
    171 	if (bn >= 0 && bn < ULFS_NDADDR) {
    172 		if (nump != NULL)
    173 			*nump = 0;
    174 		if (ump->um_fstype == ULFS1)
    175 			daddr = ulfs_fix_unwritten(ulfs_rw32(ip->i_din->u_32.di_db[bn],
    176 			    ULFS_MPNEEDSWAP(fs)));
    177 		else
    178 			daddr = ulfs_rw64(ip->i_din->u_64.di_db[bn],
    179 			    ULFS_MPNEEDSWAP(fs));
    180 		*bnp = blkptrtodb(fs, daddr);
    181 		/*
    182 		 * Since this is FFS independent code, we are out of
    183 		 * scope for the definitions of BLK_NOCOPY and
    184 		 * BLK_SNAP, but we do know that they will fall in
    185 		 * the range 1..um_seqinc, so we use that test and
    186 		 * return a request for a zeroed out buffer if attempts
    187 		 * are made to read a BLK_NOCOPY or BLK_SNAP block.
    188 		 */
    189 		if ((ip->i_flags & (SF_SNAPSHOT | SF_SNAPINVAL)) == SF_SNAPSHOT
    190 		    && daddr > 0 &&
    191 		    daddr < fs->um_seqinc) {
    192 			*bnp = -1;
    193 		} else if (*bnp == 0) {
    194 			if ((ip->i_flags & (SF_SNAPSHOT | SF_SNAPINVAL))
    195 			    == SF_SNAPSHOT) {
    196 				*bnp = blkptrtodb(fs, bn * fs->um_seqinc);
    197 			} else {
    198 				*bnp = -1;
    199 			}
    200 		} else if (runp) {
    201 			if (ump->um_fstype == ULFS1) {
    202 				for (++bn; bn < ULFS_NDADDR && *runp < maxrun &&
    203 				    is_sequential(fs,
    204 				        ulfs_fix_unwritten(ulfs_rw32(ip->i_din->u_32.di_db[bn - 1],
    205 				            ULFS_MPNEEDSWAP(fs))),
    206 				        ulfs_fix_unwritten(ulfs_rw32(ip->i_din->u_32.di_db[bn],
    207 				            ULFS_MPNEEDSWAP(fs))));
    208 				    ++bn, ++*runp);
    209 			} else {
    210 				for (++bn; bn < ULFS_NDADDR && *runp < maxrun &&
    211 				    is_sequential(fs,
    212 				        ulfs_rw64(ip->i_din->u_64.di_db[bn - 1],
    213 				            ULFS_MPNEEDSWAP(fs)),
    214 				        ulfs_rw64(ip->i_din->u_64.di_db[bn],
    215 				            ULFS_MPNEEDSWAP(fs)));
    216 				    ++bn, ++*runp);
    217 			}
    218 		}
    219 		return (0);
    220 	}
    221 
    222 	xap = ap == NULL ? a : ap;
    223 	if (!nump)
    224 		nump = &num;
    225 	if ((error = ulfs_getlbns(vp, bn, xap, nump)) != 0)
    226 		return (error);
    227 
    228 	num = *nump;
    229 
    230 	/* Get disk address out of indirect block array */
    231 	// XXX clean this up
    232 	if (ump->um_fstype == ULFS1)
    233 		daddr = ulfs_fix_unwritten(ulfs_rw32(ip->i_din->u_32.di_ib[xap->in_off],
    234 		    ULFS_MPNEEDSWAP(fs)));
    235 	else
    236 		daddr = ulfs_rw64(ip->i_din->u_64.di_ib[xap->in_off],
    237 		    ULFS_MPNEEDSWAP(fs));
    238 
    239 	for (bp = NULL, ++xap; --num; ++xap) {
    240 		/*
    241 		 * Exit the loop if there is no disk address assigned yet and
    242 		 * the indirect block isn't in the cache, or if we were
    243 		 * looking for an indirect block and we've found it.
    244 		 */
    245 
    246 		metalbn = xap->in_lbn;
    247 		if (metalbn == bn)
    248 			break;
    249 		if (daddr == 0) {
    250 			mutex_enter(&bufcache_lock);
    251 			cbp = incore(vp, metalbn);
    252 			mutex_exit(&bufcache_lock);
    253 			if (cbp == NULL)
    254 				break;
    255 		}
    256 
    257 		/*
    258 		 * If we get here, we've either got the block in the cache
    259 		 * or we have a disk address for it, go fetch it.
    260 		 */
    261 		if (bp)
    262 			brelse(bp, 0);
    263 
    264 		xap->in_exists = 1;
    265 		bp = getblk(vp, metalbn, mp->mnt_stat.f_iosize, 0, 0);
    266 		if (bp == NULL) {
    267 
    268 			/*
    269 			 * getblk() above returns NULL only iff we are
    270 			 * pagedaemon.  See the implementation of getblk
    271 			 * for detail.
    272 			 */
    273 
    274 			return (ENOMEM);
    275 		}
    276 		if (bp->b_oflags & (BO_DONE | BO_DELWRI)) {
    277 			trace(TR_BREADHIT, pack(vp, size), metalbn);
    278 		}
    279 #ifdef DIAGNOSTIC
    280 		else if (!daddr)
    281 			panic("ulfs_bmaparray: indirect block not in cache");
    282 #endif
    283 		else {
    284 			trace(TR_BREADMISS, pack(vp, size), metalbn);
    285 			bp->b_blkno = blkptrtodb(fs, daddr);
    286 			bp->b_flags |= B_READ;
    287 			BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
    288 			VOP_STRATEGY(vp, bp);
    289 			curlwp->l_ru.ru_inblock++;	/* XXX */
    290 			if ((error = biowait(bp)) != 0) {
    291 				brelse(bp, 0);
    292 				return (error);
    293 			}
    294 		}
    295 		if (ump->um_fstype == ULFS1) {
    296 			daddr = ulfs_fix_unwritten(ulfs_rw32(((u_int32_t *)bp->b_data)[xap->in_off],
    297 			    ULFS_MPNEEDSWAP(fs)));
    298 			if (num == 1 && daddr && runp) {
    299 				for (bn = xap->in_off + 1;
    300 				    bn < MNINDIR(fs) && *runp < maxrun &&
    301 				    is_sequential(fs,
    302 				        ulfs_fix_unwritten(ulfs_rw32(((int32_t *)bp->b_data)[bn-1],
    303 				            ULFS_MPNEEDSWAP(fs))),
    304 				        ulfs_fix_unwritten(ulfs_rw32(((int32_t *)bp->b_data)[bn],
    305 				            ULFS_MPNEEDSWAP(fs))));
    306 				    ++bn, ++*runp);
    307 			}
    308 		} else {
    309 			daddr = ulfs_rw64(((u_int64_t *)bp->b_data)[xap->in_off],
    310 			    ULFS_MPNEEDSWAP(fs));
    311 			if (num == 1 && daddr && runp) {
    312 				for (bn = xap->in_off + 1;
    313 				    bn < MNINDIR(fs) && *runp < maxrun &&
    314 				    is_sequential(fs,
    315 				        ulfs_rw64(((int64_t *)bp->b_data)[bn-1],
    316 				            ULFS_MPNEEDSWAP(fs)),
    317 				        ulfs_rw64(((int64_t *)bp->b_data)[bn],
    318 				            ULFS_MPNEEDSWAP(fs)));
    319 				    ++bn, ++*runp);
    320 			}
    321 		}
    322 	}
    323 	if (bp)
    324 		brelse(bp, 0);
    325 
    326 	/*
    327 	 * Since this is FFS independent code, we are out of scope for the
    328 	 * definitions of BLK_NOCOPY and BLK_SNAP, but we do know that they
    329 	 * will fall in the range 1..um_seqinc, so we use that test and
    330 	 * return a request for a zeroed out buffer if attempts are made
    331 	 * to read a BLK_NOCOPY or BLK_SNAP block.
    332 	 */
    333 	if ((ip->i_flags & (SF_SNAPSHOT | SF_SNAPINVAL)) == SF_SNAPSHOT
    334 	    && daddr > 0 && daddr < fs->um_seqinc) {
    335 		*bnp = -1;
    336 		return (0);
    337 	}
    338 	*bnp = blkptrtodb(fs, daddr);
    339 	if (*bnp == 0) {
    340 		if ((ip->i_flags & (SF_SNAPSHOT | SF_SNAPINVAL))
    341 		    == SF_SNAPSHOT) {
    342 			*bnp = blkptrtodb(fs, bn * fs->um_seqinc);
    343 		} else {
    344 			*bnp = -1;
    345 		}
    346 	}
    347 	return (0);
    348 }
    349 
    350 /*
    351  * Create an array of logical block number/offset pairs which represent the
    352  * path of indirect blocks required to access a data block.  The first "pair"
    353  * contains the logical block number of the appropriate single, double or
    354  * triple indirect block and the offset into the inode indirect block array.
    355  * Note, the logical block number of the inode single/double/triple indirect
    356  * block appears twice in the array, once with the offset into the i_ffs1_ib and
    357  * once with the offset into the page itself.
    358  */
    359 int
    360 ulfs_getlbns(struct vnode *vp, daddr_t bn, struct indir *ap, int *nump)
    361 {
    362 	daddr_t metalbn, realbn;
    363 	struct ulfsmount *ump;
    364 	struct lfs *fs;
    365 	int64_t blockcnt;
    366 	int lbc;
    367 	int i, numlevels, off;
    368 
    369 	ump = VFSTOULFS(vp->v_mount);
    370 	fs = ump->um_lfs;
    371 	if (nump)
    372 		*nump = 0;
    373 	numlevels = 0;
    374 	realbn = bn;
    375 	if (bn < 0)
    376 		bn = -bn;
    377 	KASSERT(bn >= ULFS_NDADDR);
    378 
    379 	/*
    380 	 * Determine the number of levels of indirection.  After this loop
    381 	 * is done, blockcnt indicates the number of data blocks possible
    382 	 * at the given level of indirection, and ULFS_NIADDR - i is the number
    383 	 * of levels of indirection needed to locate the requested block.
    384 	 */
    385 
    386 	bn -= ULFS_NDADDR;
    387 	for (lbc = 0, i = ULFS_NIADDR;; i--, bn -= blockcnt) {
    388 		if (i == 0)
    389 			return (EFBIG);
    390 
    391 		lbc += fs->um_lognindir;
    392 		blockcnt = (int64_t)1 << lbc;
    393 
    394 		if (bn < blockcnt)
    395 			break;
    396 	}
    397 
    398 	/* Calculate the address of the first meta-block. */
    399 	metalbn = -((realbn >= 0 ? realbn : -realbn) - bn + ULFS_NIADDR - i);
    400 
    401 	/*
    402 	 * At each iteration, off is the offset into the bap array which is
    403 	 * an array of disk addresses at the current level of indirection.
    404 	 * The logical block number and the offset in that block are stored
    405 	 * into the argument array.
    406 	 */
    407 	ap->in_lbn = metalbn;
    408 	ap->in_off = off = ULFS_NIADDR - i;
    409 	ap->in_exists = 0;
    410 	ap++;
    411 	for (++numlevels; i <= ULFS_NIADDR; i++) {
    412 		/* If searching for a meta-data block, quit when found. */
    413 		if (metalbn == realbn)
    414 			break;
    415 
    416 		lbc -= fs->um_lognindir;
    417 		off = (bn >> lbc) & (MNINDIR(fs) - 1);
    418 
    419 		++numlevels;
    420 		ap->in_lbn = metalbn;
    421 		ap->in_off = off;
    422 		ap->in_exists = 0;
    423 		++ap;
    424 
    425 		metalbn -= -1 + ((int64_t)off << lbc);
    426 	}
    427 	if (nump)
    428 		*nump = numlevels;
    429 	return (0);
    430 }
    431