Home | History | Annotate | Line # | Download | only in ffs
ffs_alloc.c revision 1.146.2.2
      1  1.146.2.2    martin /*	$NetBSD: ffs_alloc.c,v 1.146.2.2 2019/05/29 15:53:31 martin Exp $	*/
      2      1.111    simonb 
      3      1.111    simonb /*-
      4      1.122        ad  * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
      5      1.111    simonb  * All rights reserved.
      6      1.111    simonb  *
      7      1.111    simonb  * This code is derived from software contributed to The NetBSD Foundation
      8      1.111    simonb  * by Wasabi Systems, Inc.
      9      1.111    simonb  *
     10      1.111    simonb  * Redistribution and use in source and binary forms, with or without
     11      1.111    simonb  * modification, are permitted provided that the following conditions
     12      1.111    simonb  * are met:
     13      1.111    simonb  * 1. Redistributions of source code must retain the above copyright
     14      1.111    simonb  *    notice, this list of conditions and the following disclaimer.
     15      1.111    simonb  * 2. Redistributions in binary form must reproduce the above copyright
     16      1.111    simonb  *    notice, this list of conditions and the following disclaimer in the
     17      1.111    simonb  *    documentation and/or other materials provided with the distribution.
     18      1.111    simonb  *
     19      1.111    simonb  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20      1.111    simonb  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21      1.111    simonb  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22      1.111    simonb  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23      1.111    simonb  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24      1.111    simonb  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25      1.111    simonb  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26      1.111    simonb  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27      1.111    simonb  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28      1.111    simonb  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29      1.111    simonb  * POSSIBILITY OF SUCH DAMAGE.
     30      1.111    simonb  */
     31        1.2       cgd 
     32        1.1   mycroft /*
     33       1.60      fvdl  * Copyright (c) 2002 Networks Associates Technology, Inc.
     34       1.60      fvdl  * All rights reserved.
     35       1.60      fvdl  *
     36       1.60      fvdl  * This software was developed for the FreeBSD Project by Marshall
     37       1.60      fvdl  * Kirk McKusick and Network Associates Laboratories, the Security
     38       1.60      fvdl  * Research Division of Network Associates, Inc. under DARPA/SPAWAR
     39       1.60      fvdl  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
     40       1.60      fvdl  * research program
     41       1.60      fvdl  *
     42        1.1   mycroft  * Copyright (c) 1982, 1986, 1989, 1993
     43        1.1   mycroft  *	The Regents of the University of California.  All rights reserved.
     44        1.1   mycroft  *
     45        1.1   mycroft  * Redistribution and use in source and binary forms, with or without
     46        1.1   mycroft  * modification, are permitted provided that the following conditions
     47        1.1   mycroft  * are met:
     48        1.1   mycroft  * 1. Redistributions of source code must retain the above copyright
     49        1.1   mycroft  *    notice, this list of conditions and the following disclaimer.
     50        1.1   mycroft  * 2. Redistributions in binary form must reproduce the above copyright
     51        1.1   mycroft  *    notice, this list of conditions and the following disclaimer in the
     52        1.1   mycroft  *    documentation and/or other materials provided with the distribution.
     53       1.69       agc  * 3. Neither the name of the University nor the names of its contributors
     54        1.1   mycroft  *    may be used to endorse or promote products derived from this software
     55        1.1   mycroft  *    without specific prior written permission.
     56        1.1   mycroft  *
     57        1.1   mycroft  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     58        1.1   mycroft  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59        1.1   mycroft  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60        1.1   mycroft  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     61        1.1   mycroft  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     62        1.1   mycroft  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     63        1.1   mycroft  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     64        1.1   mycroft  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     65        1.1   mycroft  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     66        1.1   mycroft  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     67        1.1   mycroft  * SUCH DAMAGE.
     68        1.1   mycroft  *
     69       1.18      fvdl  *	@(#)ffs_alloc.c	8.19 (Berkeley) 7/13/95
     70        1.1   mycroft  */
     71       1.53     lukem 
     72       1.53     lukem #include <sys/cdefs.h>
     73  1.146.2.2    martin __KERNEL_RCSID(0, "$NetBSD: ffs_alloc.c,v 1.146.2.2 2019/05/29 15:53:31 martin Exp $");
     74       1.17       mrg 
     75       1.43       mrg #if defined(_KERNEL_OPT)
     76       1.27   thorpej #include "opt_ffs.h"
     77       1.21    scottr #include "opt_quota.h"
     78      1.129       chs #include "opt_uvm_page_trkown.h"
     79       1.22    scottr #endif
     80        1.1   mycroft 
     81        1.1   mycroft #include <sys/param.h>
     82        1.1   mycroft #include <sys/systm.h>
     83        1.1   mycroft #include <sys/buf.h>
     84      1.130       tls #include <sys/cprng.h>
     85      1.111    simonb #include <sys/fstrans.h>
     86      1.111    simonb #include <sys/kauth.h>
     87      1.111    simonb #include <sys/kernel.h>
     88      1.111    simonb #include <sys/mount.h>
     89        1.1   mycroft #include <sys/proc.h>
     90      1.111    simonb #include <sys/syslog.h>
     91        1.1   mycroft #include <sys/vnode.h>
     92      1.111    simonb #include <sys/wapbl.h>
     93       1.29       mrg 
     94       1.76   hannken #include <miscfs/specfs/specdev.h>
     95        1.1   mycroft #include <ufs/ufs/quota.h>
     96       1.19    bouyer #include <ufs/ufs/ufsmount.h>
     97        1.1   mycroft #include <ufs/ufs/inode.h>
     98        1.9  christos #include <ufs/ufs/ufs_extern.h>
     99       1.19    bouyer #include <ufs/ufs/ufs_bswap.h>
    100      1.111    simonb #include <ufs/ufs/ufs_wapbl.h>
    101        1.1   mycroft 
    102        1.1   mycroft #include <ufs/ffs/fs.h>
    103        1.1   mycroft #include <ufs/ffs/ffs_extern.h>
    104        1.1   mycroft 
    105      1.129       chs #ifdef UVM_PAGE_TRKOWN
    106      1.129       chs #include <uvm/uvm.h>
    107      1.129       chs #endif
    108      1.129       chs 
    109      1.111    simonb static daddr_t ffs_alloccg(struct inode *, int, daddr_t, int, int);
    110      1.111    simonb static daddr_t ffs_alloccgblk(struct inode *, struct buf *, daddr_t, int);
    111       1.85   thorpej static ino_t ffs_dirpref(struct inode *);
    112       1.85   thorpej static daddr_t ffs_fragextend(struct inode *, int, daddr_t, int, int);
    113       1.85   thorpej static void ffs_fserr(struct fs *, u_int, const char *);
    114      1.111    simonb static daddr_t ffs_hashalloc(struct inode *, int, daddr_t, int, int,
    115      1.111    simonb     daddr_t (*)(struct inode *, int, daddr_t, int, int));
    116      1.111    simonb static daddr_t ffs_nodealloccg(struct inode *, int, daddr_t, int, int);
    117       1.85   thorpej static int32_t ffs_mapsearch(struct fs *, struct cg *,
    118       1.85   thorpej 				      daddr_t, int);
    119      1.119     joerg static void ffs_blkfree_common(struct ufsmount *, struct fs *, dev_t, struct buf *,
    120      1.116     joerg     daddr_t, long, bool);
    121      1.119     joerg static void ffs_freefile_common(struct ufsmount *, struct fs *, dev_t, struct buf *, ino_t,
    122      1.119     joerg     int, bool);
    123       1.23  drochner 
    124       1.34  jdolecek /* if 1, changes in optimalization strategy are logged */
    125       1.34  jdolecek int ffs_log_changeopt = 0;
    126       1.34  jdolecek 
    127       1.23  drochner /* in ffs_tables.c */
    128       1.40  jdolecek extern const int inside[], around[];
    129       1.40  jdolecek extern const u_char * const fragtbl[];
    130        1.1   mycroft 
    131      1.116     joerg /* Basic consistency check for block allocations */
    132      1.116     joerg static int
    133      1.116     joerg ffs_check_bad_allocation(const char *func, struct fs *fs, daddr_t bno,
    134      1.116     joerg     long size, dev_t dev, ino_t inum)
    135      1.116     joerg {
    136      1.134  dholland 	if ((u_int)size > fs->fs_bsize || ffs_fragoff(fs, size) != 0 ||
    137      1.138  dholland 	    ffs_fragnum(fs, bno) + ffs_numfrags(fs, size) > fs->fs_frag) {
    138      1.120  christos 		printf("dev = 0x%llx, bno = %" PRId64 " bsize = %d, "
    139      1.120  christos 		    "size = %ld, fs = %s\n",
    140      1.120  christos 		    (long long)dev, bno, fs->fs_bsize, size, fs->fs_fsmnt);
    141      1.116     joerg 		panic("%s: bad size", func);
    142      1.116     joerg 	}
    143      1.116     joerg 
    144      1.116     joerg 	if (bno >= fs->fs_size) {
    145      1.116     joerg 		printf("bad block %" PRId64 ", ino %llu\n", bno,
    146      1.116     joerg 		    (unsigned long long)inum);
    147      1.116     joerg 		ffs_fserr(fs, inum, "bad block");
    148      1.116     joerg 		return EINVAL;
    149      1.116     joerg 	}
    150      1.116     joerg 	return 0;
    151      1.116     joerg }
    152      1.116     joerg 
    153        1.1   mycroft /*
    154        1.1   mycroft  * Allocate a block in the file system.
    155       1.81     perry  *
    156        1.1   mycroft  * The size of the requested block is given, which must be some
    157        1.1   mycroft  * multiple of fs_fsize and <= fs_bsize.
    158        1.1   mycroft  * A preference may be optionally specified. If a preference is given
    159        1.1   mycroft  * the following hierarchy is used to allocate a block:
    160        1.1   mycroft  *   1) allocate the requested block.
    161        1.1   mycroft  *   2) allocate a rotationally optimal block in the same cylinder.
    162        1.1   mycroft  *   3) allocate a block in the same cylinder group.
    163        1.1   mycroft  *   4) quadradically rehash into other cylinder groups, until an
    164        1.1   mycroft  *      available block is located.
    165       1.47       wiz  * If no block preference is given the following hierarchy is used
    166        1.1   mycroft  * to allocate a block:
    167        1.1   mycroft  *   1) allocate a block in the cylinder group that contains the
    168        1.1   mycroft  *      inode for the file.
    169        1.1   mycroft  *   2) quadradically rehash into other cylinder groups, until an
    170        1.1   mycroft  *      available block is located.
    171      1.106     pooka  *
    172      1.106     pooka  * => called with um_lock held
    173      1.106     pooka  * => releases um_lock before returning
    174        1.1   mycroft  */
    175        1.9  christos int
    176      1.111    simonb ffs_alloc(struct inode *ip, daddr_t lbn, daddr_t bpref, int size, int flags,
    177       1.91      elad     kauth_cred_t cred, daddr_t *bnp)
    178        1.1   mycroft {
    179      1.101        ad 	struct ufsmount *ump;
    180       1.62      fvdl 	struct fs *fs;
    181       1.58      fvdl 	daddr_t bno;
    182        1.9  christos 	int cg;
    183      1.127    bouyer #if defined(QUOTA) || defined(QUOTA2)
    184        1.9  christos 	int error;
    185        1.9  christos #endif
    186       1.81     perry 
    187       1.62      fvdl 	fs = ip->i_fs;
    188      1.101        ad 	ump = ip->i_ump;
    189      1.101        ad 
    190      1.101        ad 	KASSERT(mutex_owned(&ump->um_lock));
    191       1.62      fvdl 
    192       1.37       chs #ifdef UVM_PAGE_TRKOWN
    193      1.129       chs 
    194      1.129       chs 	/*
    195      1.129       chs 	 * Sanity-check that allocations within the file size
    196      1.129       chs 	 * do not allow other threads to read the stale contents
    197      1.129       chs 	 * of newly allocated blocks.
    198      1.129       chs 	 * Usually pages will exist to cover the new allocation.
    199      1.129       chs 	 * There is an optimization in ffs_write() where we skip
    200      1.129       chs 	 * creating pages if several conditions are met:
    201      1.129       chs 	 *  - the file must not be mapped (in any user address space).
    202      1.129       chs 	 *  - the write must cover whole pages and whole blocks.
    203      1.129       chs 	 * If those conditions are not met then pages must exist and
    204      1.129       chs 	 * be locked by the current thread.
    205      1.129       chs 	 */
    206      1.129       chs 
    207       1.51       chs 	if (ITOV(ip)->v_type == VREG &&
    208      1.137  dholland 	    ffs_lblktosize(fs, (voff_t)lbn) < round_page(ITOV(ip)->v_size)) {
    209       1.37       chs 		struct vm_page *pg;
    210      1.129       chs 		struct vnode *vp = ITOV(ip);
    211      1.129       chs 		struct uvm_object *uobj = &vp->v_uobj;
    212      1.137  dholland 		voff_t off = trunc_page(ffs_lblktosize(fs, lbn));
    213      1.137  dholland 		voff_t endoff = round_page(ffs_lblktosize(fs, lbn) + size);
    214       1.37       chs 
    215      1.128     rmind 		mutex_enter(uobj->vmobjlock);
    216       1.37       chs 		while (off < endoff) {
    217       1.37       chs 			pg = uvm_pagelookup(uobj, off);
    218      1.129       chs 			KASSERT((pg == NULL && (vp->v_vflag & VV_MAPPED) == 0 &&
    219      1.129       chs 				 (size & PAGE_MASK) == 0 &&
    220      1.135  dholland 				 ffs_blkoff(fs, size) == 0) ||
    221      1.129       chs 				(pg != NULL && pg->owner == curproc->p_pid &&
    222      1.129       chs 				 pg->lowner == curlwp->l_lid));
    223       1.37       chs 			off += PAGE_SIZE;
    224       1.37       chs 		}
    225      1.128     rmind 		mutex_exit(uobj->vmobjlock);
    226       1.37       chs 	}
    227       1.37       chs #endif
    228       1.37       chs 
    229        1.1   mycroft 	*bnp = 0;
    230        1.1   mycroft #ifdef DIAGNOSTIC
    231      1.134  dholland 	if ((u_int)size > fs->fs_bsize || ffs_fragoff(fs, size) != 0) {
    232      1.120  christos 		printf("dev = 0x%llx, bsize = %d, size = %d, fs = %s\n",
    233      1.120  christos 		    (unsigned long long)ip->i_dev, fs->fs_bsize, size,
    234      1.120  christos 		    fs->fs_fsmnt);
    235        1.1   mycroft 		panic("ffs_alloc: bad size");
    236        1.1   mycroft 	}
    237        1.1   mycroft 	if (cred == NOCRED)
    238       1.56    provos 		panic("ffs_alloc: missing credential");
    239        1.1   mycroft #endif /* DIAGNOSTIC */
    240        1.1   mycroft 	if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
    241        1.1   mycroft 		goto nospace;
    242       1.99     pooka 	if (freespace(fs, fs->fs_minfree) <= 0 &&
    243      1.124      elad 	    kauth_authorize_system(cred, KAUTH_SYSTEM_FS_RESERVEDSPACE, 0, NULL,
    244      1.124      elad 	    NULL, NULL) != 0)
    245        1.1   mycroft 		goto nospace;
    246      1.127    bouyer #if defined(QUOTA) || defined(QUOTA2)
    247      1.101        ad 	mutex_exit(&ump->um_lock);
    248       1.60      fvdl 	if ((error = chkdq(ip, btodb(size), cred, 0)) != 0)
    249        1.1   mycroft 		return (error);
    250      1.101        ad 	mutex_enter(&ump->um_lock);
    251        1.1   mycroft #endif
    252      1.111    simonb 
    253        1.1   mycroft 	if (bpref >= fs->fs_size)
    254        1.1   mycroft 		bpref = 0;
    255        1.1   mycroft 	if (bpref == 0)
    256        1.1   mycroft 		cg = ino_to_cg(fs, ip->i_number);
    257        1.1   mycroft 	else
    258        1.1   mycroft 		cg = dtog(fs, bpref);
    259      1.111    simonb 	bno = ffs_hashalloc(ip, cg, bpref, size, flags, ffs_alloccg);
    260        1.1   mycroft 	if (bno > 0) {
    261       1.65  kristerw 		DIP_ADD(ip, blocks, btodb(size));
    262        1.1   mycroft 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
    263        1.1   mycroft 		*bnp = bno;
    264        1.1   mycroft 		return (0);
    265        1.1   mycroft 	}
    266      1.127    bouyer #if defined(QUOTA) || defined(QUOTA2)
    267        1.1   mycroft 	/*
    268        1.1   mycroft 	 * Restore user's disk quota because allocation failed.
    269        1.1   mycroft 	 */
    270       1.60      fvdl 	(void) chkdq(ip, -btodb(size), cred, FORCE);
    271        1.1   mycroft #endif
    272      1.111    simonb 	if (flags & B_CONTIG) {
    273      1.111    simonb 		/*
    274      1.111    simonb 		 * XXX ump->um_lock handling is "suspect" at best.
    275      1.111    simonb 		 * For the case where ffs_hashalloc() fails early
    276      1.111    simonb 		 * in the B_CONTIG case we reach here with um_lock
    277      1.111    simonb 		 * already unlocked, so we can't release it again
    278      1.111    simonb 		 * like in the normal error path.  See kern/39206.
    279      1.111    simonb 		 *
    280      1.111    simonb 		 *
    281      1.111    simonb 		 * Fail silently - it's up to our caller to report
    282      1.111    simonb 		 * errors.
    283      1.111    simonb 		 */
    284      1.111    simonb 		return (ENOSPC);
    285      1.111    simonb 	}
    286        1.1   mycroft nospace:
    287      1.101        ad 	mutex_exit(&ump->um_lock);
    288       1.91      elad 	ffs_fserr(fs, kauth_cred_geteuid(cred), "file system full");
    289        1.1   mycroft 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
    290        1.1   mycroft 	return (ENOSPC);
    291        1.1   mycroft }
    292        1.1   mycroft 
    293        1.1   mycroft /*
    294        1.1   mycroft  * Reallocate a fragment to a bigger size
    295        1.1   mycroft  *
    296        1.1   mycroft  * The number and size of the old block is given, and a preference
    297        1.1   mycroft  * and new size is also specified. The allocator attempts to extend
    298        1.1   mycroft  * the original block. Failing that, the regular block allocator is
    299        1.1   mycroft  * invoked to get an appropriate block.
    300      1.106     pooka  *
    301      1.106     pooka  * => called with um_lock held
    302      1.106     pooka  * => return with um_lock released
    303        1.1   mycroft  */
    304        1.9  christos int
    305       1.85   thorpej ffs_realloccg(struct inode *ip, daddr_t lbprev, daddr_t bpref, int osize,
    306       1.91      elad     int nsize, kauth_cred_t cred, struct buf **bpp, daddr_t *blknop)
    307        1.1   mycroft {
    308      1.101        ad 	struct ufsmount *ump;
    309       1.62      fvdl 	struct fs *fs;
    310        1.1   mycroft 	struct buf *bp;
    311        1.1   mycroft 	int cg, request, error;
    312       1.58      fvdl 	daddr_t bprev, bno;
    313       1.25   thorpej 
    314       1.62      fvdl 	fs = ip->i_fs;
    315      1.101        ad 	ump = ip->i_ump;
    316      1.101        ad 
    317      1.101        ad 	KASSERT(mutex_owned(&ump->um_lock));
    318      1.101        ad 
    319       1.37       chs #ifdef UVM_PAGE_TRKOWN
    320      1.129       chs 
    321      1.129       chs 	/*
    322      1.129       chs 	 * Sanity-check that allocations within the file size
    323      1.129       chs 	 * do not allow other threads to read the stale contents
    324      1.129       chs 	 * of newly allocated blocks.
    325      1.129       chs 	 * Unlike in ffs_alloc(), here pages must always exist
    326      1.129       chs 	 * for such allocations, because only the last block of a file
    327      1.129       chs 	 * can be a fragment and ffs_write() will reallocate the
    328      1.129       chs 	 * fragment to the new size using ufs_balloc_range(),
    329      1.129       chs 	 * which always creates pages to cover blocks it allocates.
    330      1.129       chs 	 */
    331      1.129       chs 
    332       1.37       chs 	if (ITOV(ip)->v_type == VREG) {
    333       1.37       chs 		struct vm_page *pg;
    334       1.51       chs 		struct uvm_object *uobj = &ITOV(ip)->v_uobj;
    335      1.137  dholland 		voff_t off = trunc_page(ffs_lblktosize(fs, lbprev));
    336      1.137  dholland 		voff_t endoff = round_page(ffs_lblktosize(fs, lbprev) + osize);
    337       1.37       chs 
    338      1.128     rmind 		mutex_enter(uobj->vmobjlock);
    339       1.37       chs 		while (off < endoff) {
    340       1.37       chs 			pg = uvm_pagelookup(uobj, off);
    341      1.129       chs 			KASSERT(pg->owner == curproc->p_pid &&
    342      1.129       chs 				pg->lowner == curlwp->l_lid);
    343       1.37       chs 			off += PAGE_SIZE;
    344       1.37       chs 		}
    345      1.128     rmind 		mutex_exit(uobj->vmobjlock);
    346       1.37       chs 	}
    347       1.37       chs #endif
    348       1.37       chs 
    349        1.1   mycroft #ifdef DIAGNOSTIC
    350      1.134  dholland 	if ((u_int)osize > fs->fs_bsize || ffs_fragoff(fs, osize) != 0 ||
    351      1.134  dholland 	    (u_int)nsize > fs->fs_bsize || ffs_fragoff(fs, nsize) != 0) {
    352       1.13  christos 		printf(
    353      1.120  christos 		    "dev = 0x%llx, bsize = %d, osize = %d, nsize = %d, fs = %s\n",
    354      1.120  christos 		    (unsigned long long)ip->i_dev, fs->fs_bsize, osize, nsize,
    355      1.120  christos 		    fs->fs_fsmnt);
    356        1.1   mycroft 		panic("ffs_realloccg: bad size");
    357        1.1   mycroft 	}
    358        1.1   mycroft 	if (cred == NOCRED)
    359       1.56    provos 		panic("ffs_realloccg: missing credential");
    360        1.1   mycroft #endif /* DIAGNOSTIC */
    361       1.99     pooka 	if (freespace(fs, fs->fs_minfree) <= 0 &&
    362      1.124      elad 	    kauth_authorize_system(cred, KAUTH_SYSTEM_FS_RESERVEDSPACE, 0, NULL,
    363      1.124      elad 	    NULL, NULL) != 0) {
    364      1.101        ad 		mutex_exit(&ump->um_lock);
    365        1.1   mycroft 		goto nospace;
    366      1.101        ad 	}
    367       1.60      fvdl 	if (fs->fs_magic == FS_UFS2_MAGIC)
    368       1.60      fvdl 		bprev = ufs_rw64(ip->i_ffs2_db[lbprev], UFS_FSNEEDSWAP(fs));
    369       1.60      fvdl 	else
    370       1.60      fvdl 		bprev = ufs_rw32(ip->i_ffs1_db[lbprev], UFS_FSNEEDSWAP(fs));
    371       1.60      fvdl 
    372       1.60      fvdl 	if (bprev == 0) {
    373      1.120  christos 		printf("dev = 0x%llx, bsize = %d, bprev = %" PRId64 ", fs = %s\n",
    374      1.120  christos 		    (unsigned long long)ip->i_dev, fs->fs_bsize, bprev,
    375      1.120  christos 		    fs->fs_fsmnt);
    376        1.1   mycroft 		panic("ffs_realloccg: bad bprev");
    377        1.1   mycroft 	}
    378      1.101        ad 	mutex_exit(&ump->um_lock);
    379      1.101        ad 
    380        1.1   mycroft 	/*
    381        1.1   mycroft 	 * Allocate the extra space in the buffer.
    382        1.1   mycroft 	 */
    383       1.37       chs 	if (bpp != NULL &&
    384      1.107   hannken 	    (error = bread(ITOV(ip), lbprev, osize, NOCRED, 0, &bp)) != 0) {
    385        1.1   mycroft 		return (error);
    386        1.1   mycroft 	}
    387      1.127    bouyer #if defined(QUOTA) || defined(QUOTA2)
    388       1.60      fvdl 	if ((error = chkdq(ip, btodb(nsize - osize), cred, 0)) != 0) {
    389       1.44       chs 		if (bpp != NULL) {
    390      1.101        ad 			brelse(bp, 0);
    391       1.44       chs 		}
    392        1.1   mycroft 		return (error);
    393        1.1   mycroft 	}
    394        1.1   mycroft #endif
    395        1.1   mycroft 	/*
    396        1.1   mycroft 	 * Check for extension in the existing location.
    397        1.1   mycroft 	 */
    398        1.1   mycroft 	cg = dtog(fs, bprev);
    399      1.101        ad 	mutex_enter(&ump->um_lock);
    400       1.60      fvdl 	if ((bno = ffs_fragextend(ip, cg, bprev, osize, nsize)) != 0) {
    401       1.65  kristerw 		DIP_ADD(ip, blocks, btodb(nsize - osize));
    402        1.1   mycroft 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
    403       1.37       chs 
    404       1.37       chs 		if (bpp != NULL) {
    405      1.136  dholland 			if (bp->b_blkno != FFS_FSBTODB(fs, bno))
    406       1.37       chs 				panic("bad blockno");
    407       1.72        pk 			allocbuf(bp, nsize, 1);
    408       1.98  christos 			memset((char *)bp->b_data + osize, 0, nsize - osize);
    409      1.105        ad 			mutex_enter(bp->b_objlock);
    410      1.109        ad 			KASSERT(!cv_has_waiters(&bp->b_done));
    411      1.105        ad 			bp->b_oflags |= BO_DONE;
    412      1.105        ad 			mutex_exit(bp->b_objlock);
    413       1.37       chs 			*bpp = bp;
    414       1.37       chs 		}
    415       1.37       chs 		if (blknop != NULL) {
    416       1.37       chs 			*blknop = bno;
    417       1.37       chs 		}
    418        1.1   mycroft 		return (0);
    419        1.1   mycroft 	}
    420        1.1   mycroft 	/*
    421        1.1   mycroft 	 * Allocate a new disk location.
    422        1.1   mycroft 	 */
    423        1.1   mycroft 	if (bpref >= fs->fs_size)
    424        1.1   mycroft 		bpref = 0;
    425        1.1   mycroft 	switch ((int)fs->fs_optim) {
    426        1.1   mycroft 	case FS_OPTSPACE:
    427        1.1   mycroft 		/*
    428       1.81     perry 		 * Allocate an exact sized fragment. Although this makes
    429       1.81     perry 		 * best use of space, we will waste time relocating it if
    430        1.1   mycroft 		 * the file continues to grow. If the fragmentation is
    431        1.1   mycroft 		 * less than half of the minimum free reserve, we choose
    432        1.1   mycroft 		 * to begin optimizing for time.
    433        1.1   mycroft 		 */
    434        1.1   mycroft 		request = nsize;
    435        1.1   mycroft 		if (fs->fs_minfree < 5 ||
    436        1.1   mycroft 		    fs->fs_cstotal.cs_nffree >
    437        1.1   mycroft 		    fs->fs_dsize * fs->fs_minfree / (2 * 100))
    438        1.1   mycroft 			break;
    439       1.34  jdolecek 
    440       1.34  jdolecek 		if (ffs_log_changeopt) {
    441       1.34  jdolecek 			log(LOG_NOTICE,
    442       1.34  jdolecek 				"%s: optimization changed from SPACE to TIME\n",
    443       1.34  jdolecek 				fs->fs_fsmnt);
    444       1.34  jdolecek 		}
    445       1.34  jdolecek 
    446        1.1   mycroft 		fs->fs_optim = FS_OPTTIME;
    447        1.1   mycroft 		break;
    448        1.1   mycroft 	case FS_OPTTIME:
    449        1.1   mycroft 		/*
    450        1.1   mycroft 		 * At this point we have discovered a file that is trying to
    451        1.1   mycroft 		 * grow a small fragment to a larger fragment. To save time,
    452        1.1   mycroft 		 * we allocate a full sized block, then free the unused portion.
    453        1.1   mycroft 		 * If the file continues to grow, the `ffs_fragextend' call
    454        1.1   mycroft 		 * above will be able to grow it in place without further
    455        1.1   mycroft 		 * copying. If aberrant programs cause disk fragmentation to
    456        1.1   mycroft 		 * grow within 2% of the free reserve, we choose to begin
    457        1.1   mycroft 		 * optimizing for space.
    458        1.1   mycroft 		 */
    459        1.1   mycroft 		request = fs->fs_bsize;
    460        1.1   mycroft 		if (fs->fs_cstotal.cs_nffree <
    461        1.1   mycroft 		    fs->fs_dsize * (fs->fs_minfree - 2) / 100)
    462        1.1   mycroft 			break;
    463       1.34  jdolecek 
    464       1.34  jdolecek 		if (ffs_log_changeopt) {
    465       1.34  jdolecek 			log(LOG_NOTICE,
    466       1.34  jdolecek 				"%s: optimization changed from TIME to SPACE\n",
    467       1.34  jdolecek 				fs->fs_fsmnt);
    468       1.34  jdolecek 		}
    469       1.34  jdolecek 
    470        1.1   mycroft 		fs->fs_optim = FS_OPTSPACE;
    471        1.1   mycroft 		break;
    472        1.1   mycroft 	default:
    473      1.120  christos 		printf("dev = 0x%llx, optim = %d, fs = %s\n",
    474      1.120  christos 		    (unsigned long long)ip->i_dev, fs->fs_optim, fs->fs_fsmnt);
    475        1.1   mycroft 		panic("ffs_realloccg: bad optim");
    476        1.1   mycroft 		/* NOTREACHED */
    477        1.1   mycroft 	}
    478      1.111    simonb 	bno = ffs_hashalloc(ip, cg, bpref, request, 0, ffs_alloccg);
    479        1.1   mycroft 	if (bno > 0) {
    480      1.122        ad 		if ((ip->i_ump->um_mountp->mnt_wapbl) &&
    481      1.122        ad 		    (ITOV(ip)->v_type != VREG)) {
    482      1.122        ad 			UFS_WAPBL_REGISTER_DEALLOCATION(
    483      1.136  dholland 			    ip->i_ump->um_mountp, FFS_FSBTODB(fs, bprev),
    484      1.122        ad 			    osize);
    485      1.122        ad 		} else {
    486      1.122        ad 			ffs_blkfree(fs, ip->i_devvp, bprev, (long)osize,
    487      1.122        ad 			    ip->i_number);
    488      1.111    simonb 		}
    489      1.111    simonb 		if (nsize < request) {
    490      1.111    simonb 			if ((ip->i_ump->um_mountp->mnt_wapbl) &&
    491      1.111    simonb 			    (ITOV(ip)->v_type != VREG)) {
    492      1.111    simonb 				UFS_WAPBL_REGISTER_DEALLOCATION(
    493      1.111    simonb 				    ip->i_ump->um_mountp,
    494      1.137  dholland 				    FFS_FSBTODB(fs, (bno + ffs_numfrags(fs, nsize))),
    495      1.111    simonb 				    request - nsize);
    496      1.111    simonb 			} else
    497      1.111    simonb 				ffs_blkfree(fs, ip->i_devvp,
    498      1.137  dholland 				    bno + ffs_numfrags(fs, nsize),
    499      1.111    simonb 				    (long)(request - nsize), ip->i_number);
    500      1.111    simonb 		}
    501       1.65  kristerw 		DIP_ADD(ip, blocks, btodb(nsize - osize));
    502        1.1   mycroft 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
    503       1.37       chs 		if (bpp != NULL) {
    504      1.136  dholland 			bp->b_blkno = FFS_FSBTODB(fs, bno);
    505       1.72        pk 			allocbuf(bp, nsize, 1);
    506       1.98  christos 			memset((char *)bp->b_data + osize, 0, (u_int)nsize - osize);
    507      1.105        ad 			mutex_enter(bp->b_objlock);
    508      1.109        ad 			KASSERT(!cv_has_waiters(&bp->b_done));
    509      1.105        ad 			bp->b_oflags |= BO_DONE;
    510      1.105        ad 			mutex_exit(bp->b_objlock);
    511       1.37       chs 			*bpp = bp;
    512       1.37       chs 		}
    513       1.37       chs 		if (blknop != NULL) {
    514       1.37       chs 			*blknop = bno;
    515       1.37       chs 		}
    516        1.1   mycroft 		return (0);
    517        1.1   mycroft 	}
    518      1.101        ad 	mutex_exit(&ump->um_lock);
    519      1.101        ad 
    520      1.127    bouyer #if defined(QUOTA) || defined(QUOTA2)
    521        1.1   mycroft 	/*
    522        1.1   mycroft 	 * Restore user's disk quota because allocation failed.
    523        1.1   mycroft 	 */
    524       1.60      fvdl 	(void) chkdq(ip, -btodb(nsize - osize), cred, FORCE);
    525        1.1   mycroft #endif
    526       1.37       chs 	if (bpp != NULL) {
    527      1.101        ad 		brelse(bp, 0);
    528       1.37       chs 	}
    529       1.37       chs 
    530        1.1   mycroft nospace:
    531        1.1   mycroft 	/*
    532        1.1   mycroft 	 * no space available
    533        1.1   mycroft 	 */
    534       1.91      elad 	ffs_fserr(fs, kauth_cred_geteuid(cred), "file system full");
    535        1.1   mycroft 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
    536        1.1   mycroft 	return (ENOSPC);
    537        1.1   mycroft }
    538        1.1   mycroft 
    539        1.1   mycroft /*
    540        1.1   mycroft  * Allocate an inode in the file system.
    541       1.81     perry  *
    542        1.1   mycroft  * If allocating a directory, use ffs_dirpref to select the inode.
    543        1.1   mycroft  * If allocating in a directory, the following hierarchy is followed:
    544        1.1   mycroft  *   1) allocate the preferred inode.
    545        1.1   mycroft  *   2) allocate an inode in the same cylinder group.
    546        1.1   mycroft  *   3) quadradically rehash into other cylinder groups, until an
    547        1.1   mycroft  *      available inode is located.
    548       1.47       wiz  * If no inode preference is given the following hierarchy is used
    549        1.1   mycroft  * to allocate an inode:
    550        1.1   mycroft  *   1) allocate an inode in cylinder group 0.
    551        1.1   mycroft  *   2) quadradically rehash into other cylinder groups, until an
    552        1.1   mycroft  *      available inode is located.
    553      1.106     pooka  *
    554      1.106     pooka  * => um_lock not held upon entry or return
    555        1.1   mycroft  */
    556        1.9  christos int
    557       1.91      elad ffs_valloc(struct vnode *pvp, int mode, kauth_cred_t cred,
    558       1.88      yamt     struct vnode **vpp)
    559        1.9  christos {
    560      1.101        ad 	struct ufsmount *ump;
    561       1.33  augustss 	struct inode *pip;
    562       1.33  augustss 	struct fs *fs;
    563       1.33  augustss 	struct inode *ip;
    564       1.60      fvdl 	struct timespec ts;
    565        1.1   mycroft 	ino_t ino, ipref;
    566        1.1   mycroft 	int cg, error;
    567       1.81     perry 
    568      1.111    simonb 	UFS_WAPBL_JUNLOCK_ASSERT(pvp->v_mount);
    569      1.111    simonb 
    570       1.88      yamt 	*vpp = NULL;
    571        1.1   mycroft 	pip = VTOI(pvp);
    572        1.1   mycroft 	fs = pip->i_fs;
    573      1.101        ad 	ump = pip->i_ump;
    574      1.101        ad 
    575      1.111    simonb 	error = UFS_WAPBL_BEGIN(pvp->v_mount);
    576      1.111    simonb 	if (error) {
    577      1.111    simonb 		return error;
    578      1.111    simonb 	}
    579      1.101        ad 	mutex_enter(&ump->um_lock);
    580        1.1   mycroft 	if (fs->fs_cstotal.cs_nifree == 0)
    581        1.1   mycroft 		goto noinodes;
    582        1.1   mycroft 
    583        1.1   mycroft 	if ((mode & IFMT) == IFDIR)
    584       1.50     lukem 		ipref = ffs_dirpref(pip);
    585       1.50     lukem 	else
    586       1.50     lukem 		ipref = pip->i_number;
    587        1.1   mycroft 	if (ipref >= fs->fs_ncg * fs->fs_ipg)
    588        1.1   mycroft 		ipref = 0;
    589        1.1   mycroft 	cg = ino_to_cg(fs, ipref);
    590       1.50     lukem 	/*
    591       1.50     lukem 	 * Track number of dirs created one after another
    592       1.50     lukem 	 * in a same cg without intervening by files.
    593       1.50     lukem 	 */
    594       1.50     lukem 	if ((mode & IFMT) == IFDIR) {
    595       1.63      fvdl 		if (fs->fs_contigdirs[cg] < 255)
    596       1.50     lukem 			fs->fs_contigdirs[cg]++;
    597       1.50     lukem 	} else {
    598       1.50     lukem 		if (fs->fs_contigdirs[cg] > 0)
    599       1.50     lukem 			fs->fs_contigdirs[cg]--;
    600       1.50     lukem 	}
    601      1.111    simonb 	ino = (ino_t)ffs_hashalloc(pip, cg, ipref, mode, 0, ffs_nodealloccg);
    602        1.1   mycroft 	if (ino == 0)
    603        1.1   mycroft 		goto noinodes;
    604      1.111    simonb 	UFS_WAPBL_END(pvp->v_mount);
    605       1.88      yamt 	error = VFS_VGET(pvp->v_mount, ino, vpp);
    606        1.1   mycroft 	if (error) {
    607      1.111    simonb 		int err;
    608      1.111    simonb 		err = UFS_WAPBL_BEGIN(pvp->v_mount);
    609      1.111    simonb 		if (err == 0)
    610      1.111    simonb 			ffs_vfree(pvp, ino, mode);
    611      1.111    simonb 		if (err == 0)
    612      1.111    simonb 			UFS_WAPBL_END(pvp->v_mount);
    613        1.1   mycroft 		return (error);
    614        1.1   mycroft 	}
    615       1.90      yamt 	KASSERT((*vpp)->v_type == VNON);
    616       1.88      yamt 	ip = VTOI(*vpp);
    617       1.60      fvdl 	if (ip->i_mode) {
    618       1.60      fvdl #if 0
    619       1.13  christos 		printf("mode = 0%o, inum = %d, fs = %s\n",
    620       1.60      fvdl 		    ip->i_mode, ip->i_number, fs->fs_fsmnt);
    621       1.60      fvdl #else
    622       1.60      fvdl 		printf("dmode %x mode %x dgen %x gen %x\n",
    623       1.60      fvdl 		    DIP(ip, mode), ip->i_mode,
    624       1.60      fvdl 		    DIP(ip, gen), ip->i_gen);
    625       1.60      fvdl 		printf("size %llx blocks %llx\n",
    626       1.60      fvdl 		    (long long)DIP(ip, size), (long long)DIP(ip, blocks));
    627       1.86  christos 		printf("ino %llu ipref %llu\n", (unsigned long long)ino,
    628       1.86  christos 		    (unsigned long long)ipref);
    629       1.60      fvdl #if 0
    630      1.136  dholland 		error = bread(ump->um_devvp, FFS_FSBTODB(fs, ino_to_fsba(fs, ino)),
    631      1.107   hannken 		    (int)fs->fs_bsize, NOCRED, 0, &bp);
    632       1.60      fvdl #endif
    633       1.60      fvdl 
    634       1.60      fvdl #endif
    635        1.1   mycroft 		panic("ffs_valloc: dup alloc");
    636        1.1   mycroft 	}
    637       1.60      fvdl 	if (DIP(ip, blocks)) {				/* XXX */
    638      1.145  dholland 		printf("free inode %llu on %s had %" PRId64 " blocks\n",
    639      1.145  dholland 		    (unsigned long long)ino, fs->fs_fsmnt, DIP(ip, blocks));
    640       1.65  kristerw 		DIP_ASSIGN(ip, blocks, 0);
    641        1.1   mycroft 	}
    642       1.57   hannken 	ip->i_flag &= ~IN_SPACECOUNTED;
    643       1.61      fvdl 	ip->i_flags = 0;
    644       1.65  kristerw 	DIP_ASSIGN(ip, flags, 0);
    645        1.1   mycroft 	/*
    646        1.1   mycroft 	 * Set up a new generation number for this inode.
    647        1.1   mycroft 	 */
    648       1.60      fvdl 	ip->i_gen++;
    649       1.65  kristerw 	DIP_ASSIGN(ip, gen, ip->i_gen);
    650       1.60      fvdl 	if (fs->fs_magic == FS_UFS2_MAGIC) {
    651       1.93      yamt 		vfs_timestamp(&ts);
    652       1.60      fvdl 		ip->i_ffs2_birthtime = ts.tv_sec;
    653       1.60      fvdl 		ip->i_ffs2_birthnsec = ts.tv_nsec;
    654       1.60      fvdl 	}
    655        1.1   mycroft 	return (0);
    656        1.1   mycroft noinodes:
    657      1.101        ad 	mutex_exit(&ump->um_lock);
    658      1.111    simonb 	UFS_WAPBL_END(pvp->v_mount);
    659       1.91      elad 	ffs_fserr(fs, kauth_cred_geteuid(cred), "out of inodes");
    660        1.1   mycroft 	uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt);
    661        1.1   mycroft 	return (ENOSPC);
    662        1.1   mycroft }
    663        1.1   mycroft 
    664        1.1   mycroft /*
    665       1.50     lukem  * Find a cylinder group in which to place a directory.
    666       1.42  sommerfe  *
    667       1.50     lukem  * The policy implemented by this algorithm is to allocate a
    668       1.50     lukem  * directory inode in the same cylinder group as its parent
    669       1.50     lukem  * directory, but also to reserve space for its files inodes
    670       1.50     lukem  * and data. Restrict the number of directories which may be
    671       1.50     lukem  * allocated one after another in the same cylinder group
    672       1.50     lukem  * without intervening allocation of files.
    673       1.42  sommerfe  *
    674       1.50     lukem  * If we allocate a first level directory then force allocation
    675       1.50     lukem  * in another cylinder group.
    676        1.1   mycroft  */
    677        1.1   mycroft static ino_t
    678       1.85   thorpej ffs_dirpref(struct inode *pip)
    679        1.1   mycroft {
    680       1.50     lukem 	register struct fs *fs;
    681       1.74     soren 	int cg, prefcg;
    682       1.89       dsl 	int64_t dirsize, cgsize, curdsz;
    683       1.89       dsl 	int avgifree, avgbfree, avgndir;
    684       1.50     lukem 	int minifree, minbfree, maxndir;
    685       1.50     lukem 	int mincg, minndir;
    686       1.50     lukem 	int maxcontigdirs;
    687       1.50     lukem 
    688      1.101        ad 	KASSERT(mutex_owned(&pip->i_ump->um_lock));
    689      1.101        ad 
    690       1.50     lukem 	fs = pip->i_fs;
    691        1.1   mycroft 
    692        1.1   mycroft 	avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
    693       1.50     lukem 	avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
    694       1.50     lukem 	avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg;
    695       1.50     lukem 
    696       1.50     lukem 	/*
    697       1.50     lukem 	 * Force allocation in another cg if creating a first level dir.
    698       1.50     lukem 	 */
    699      1.102        ad 	if (ITOV(pip)->v_vflag & VV_ROOT) {
    700       1.71   mycroft 		prefcg = random() % fs->fs_ncg;
    701       1.50     lukem 		mincg = prefcg;
    702       1.50     lukem 		minndir = fs->fs_ipg;
    703       1.50     lukem 		for (cg = prefcg; cg < fs->fs_ncg; cg++)
    704       1.50     lukem 			if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
    705       1.50     lukem 			    fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
    706       1.50     lukem 			    fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
    707       1.42  sommerfe 				mincg = cg;
    708       1.50     lukem 				minndir = fs->fs_cs(fs, cg).cs_ndir;
    709       1.42  sommerfe 			}
    710       1.50     lukem 		for (cg = 0; cg < prefcg; cg++)
    711       1.50     lukem 			if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
    712       1.50     lukem 			    fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
    713       1.50     lukem 			    fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
    714       1.50     lukem 				mincg = cg;
    715       1.50     lukem 				minndir = fs->fs_cs(fs, cg).cs_ndir;
    716       1.42  sommerfe 			}
    717       1.50     lukem 		return ((ino_t)(fs->fs_ipg * mincg));
    718       1.42  sommerfe 	}
    719       1.50     lukem 
    720       1.50     lukem 	/*
    721       1.50     lukem 	 * Count various limits which used for
    722       1.50     lukem 	 * optimal allocation of a directory inode.
    723      1.144       bad 	 * Try cylinder groups with >75% avgifree and avgbfree.
    724      1.144       bad 	 * Avoid cylinder groups with no free blocks or inodes as that
    725      1.144       bad 	 * triggers an I/O-expensive cylinder group scan.
    726       1.50     lukem 	 */
    727       1.50     lukem 	maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg);
    728      1.144       bad 	minifree = avgifree - avgifree / 4;
    729      1.144       bad 	if (minifree < 1)
    730      1.144       bad 		minifree = 1;
    731      1.144       bad 	minbfree = avgbfree - avgbfree / 4;
    732      1.144       bad 	if (minbfree < 1)
    733      1.144       bad 		minbfree = 1;
    734       1.89       dsl 	cgsize = (int64_t)fs->fs_fsize * fs->fs_fpg;
    735       1.89       dsl 	dirsize = (int64_t)fs->fs_avgfilesize * fs->fs_avgfpdir;
    736       1.89       dsl 	if (avgndir != 0) {
    737       1.89       dsl 		curdsz = (cgsize - (int64_t)avgbfree * fs->fs_bsize) / avgndir;
    738       1.89       dsl 		if (dirsize < curdsz)
    739       1.89       dsl 			dirsize = curdsz;
    740       1.89       dsl 	}
    741       1.89       dsl 	if (cgsize < dirsize * 255)
    742      1.144       bad 		maxcontigdirs = (avgbfree * fs->fs_bsize) / dirsize;
    743       1.89       dsl 	else
    744       1.89       dsl 		maxcontigdirs = 255;
    745       1.50     lukem 	if (fs->fs_avgfpdir > 0)
    746       1.50     lukem 		maxcontigdirs = min(maxcontigdirs,
    747       1.50     lukem 				    fs->fs_ipg / fs->fs_avgfpdir);
    748       1.50     lukem 	if (maxcontigdirs == 0)
    749       1.50     lukem 		maxcontigdirs = 1;
    750       1.50     lukem 
    751       1.50     lukem 	/*
    752       1.81     perry 	 * Limit number of dirs in one cg and reserve space for
    753       1.50     lukem 	 * regular files, but only if we have no deficit in
    754       1.50     lukem 	 * inodes or space.
    755       1.50     lukem 	 */
    756       1.50     lukem 	prefcg = ino_to_cg(fs, pip->i_number);
    757       1.50     lukem 	for (cg = prefcg; cg < fs->fs_ncg; cg++)
    758       1.50     lukem 		if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
    759       1.50     lukem 		    fs->fs_cs(fs, cg).cs_nifree >= minifree &&
    760       1.50     lukem 	    	    fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
    761       1.50     lukem 			if (fs->fs_contigdirs[cg] < maxcontigdirs)
    762       1.50     lukem 				return ((ino_t)(fs->fs_ipg * cg));
    763       1.50     lukem 		}
    764       1.50     lukem 	for (cg = 0; cg < prefcg; cg++)
    765       1.50     lukem 		if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
    766       1.50     lukem 		    fs->fs_cs(fs, cg).cs_nifree >= minifree &&
    767       1.50     lukem 	    	    fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
    768       1.50     lukem 			if (fs->fs_contigdirs[cg] < maxcontigdirs)
    769       1.50     lukem 				return ((ino_t)(fs->fs_ipg * cg));
    770       1.50     lukem 		}
    771       1.50     lukem 	/*
    772       1.50     lukem 	 * This is a backstop when we are deficient in space.
    773       1.50     lukem 	 */
    774       1.50     lukem 	for (cg = prefcg; cg < fs->fs_ncg; cg++)
    775       1.50     lukem 		if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
    776       1.50     lukem 			return ((ino_t)(fs->fs_ipg * cg));
    777       1.50     lukem 	for (cg = 0; cg < prefcg; cg++)
    778       1.50     lukem 		if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
    779       1.50     lukem 			break;
    780       1.50     lukem 	return ((ino_t)(fs->fs_ipg * cg));
    781        1.1   mycroft }
    782        1.1   mycroft 
    783        1.1   mycroft /*
    784        1.1   mycroft  * Select the desired position for the next block in a file.  The file is
    785        1.1   mycroft  * logically divided into sections. The first section is composed of the
    786        1.1   mycroft  * direct blocks. Each additional section contains fs_maxbpg blocks.
    787       1.81     perry  *
    788        1.1   mycroft  * If no blocks have been allocated in the first section, the policy is to
    789        1.1   mycroft  * request a block in the same cylinder group as the inode that describes
    790        1.1   mycroft  * the file. If no blocks have been allocated in any other section, the
    791        1.1   mycroft  * policy is to place the section in a cylinder group with a greater than
    792        1.1   mycroft  * average number of free blocks.  An appropriate cylinder group is found
    793        1.1   mycroft  * by using a rotor that sweeps the cylinder groups. When a new group of
    794        1.1   mycroft  * blocks is needed, the sweep begins in the cylinder group following the
    795        1.1   mycroft  * cylinder group from which the previous allocation was made. The sweep
    796        1.1   mycroft  * continues until a cylinder group with greater than the average number
    797        1.1   mycroft  * of free blocks is found. If the allocation is for the first block in an
    798        1.1   mycroft  * indirect block, the information on the previous allocation is unavailable;
    799        1.1   mycroft  * here a best guess is made based upon the logical block number being
    800        1.1   mycroft  * allocated.
    801       1.81     perry  *
    802        1.1   mycroft  * If a section is already partially allocated, the policy is to
    803        1.1   mycroft  * contiguously allocate fs_maxcontig blocks.  The end of one of these
    804       1.60      fvdl  * contiguous blocks and the beginning of the next is laid out
    805       1.60      fvdl  * contigously if possible.
    806      1.106     pooka  *
    807      1.106     pooka  * => um_lock held on entry and exit
    808        1.1   mycroft  */
    809       1.58      fvdl daddr_t
    810      1.111    simonb ffs_blkpref_ufs1(struct inode *ip, daddr_t lbn, int indx, int flags,
    811       1.85   thorpej     int32_t *bap /* XXX ondisk32 */)
    812        1.1   mycroft {
    813       1.33  augustss 	struct fs *fs;
    814       1.33  augustss 	int cg;
    815        1.1   mycroft 	int avgbfree, startcg;
    816        1.1   mycroft 
    817      1.101        ad 	KASSERT(mutex_owned(&ip->i_ump->um_lock));
    818      1.101        ad 
    819        1.1   mycroft 	fs = ip->i_fs;
    820      1.111    simonb 
    821      1.111    simonb 	/*
    822      1.111    simonb 	 * If allocating a contiguous file with B_CONTIG, use the hints
    823      1.111    simonb 	 * in the inode extentions to return the desired block.
    824      1.111    simonb 	 *
    825      1.111    simonb 	 * For metadata (indirect blocks) return the address of where
    826      1.111    simonb 	 * the first indirect block resides - we'll scan for the next
    827      1.111    simonb 	 * available slot if we need to allocate more than one indirect
    828      1.111    simonb 	 * block.  For data, return the address of the actual block
    829      1.111    simonb 	 * relative to the address of the first data block.
    830      1.111    simonb 	 */
    831      1.111    simonb 	if (flags & B_CONTIG) {
    832      1.111    simonb 		KASSERT(ip->i_ffs_first_data_blk != 0);
    833      1.111    simonb 		KASSERT(ip->i_ffs_first_indir_blk != 0);
    834      1.111    simonb 		if (flags & B_METAONLY)
    835      1.111    simonb 			return ip->i_ffs_first_indir_blk;
    836      1.111    simonb 		else
    837      1.138  dholland 			return ip->i_ffs_first_data_blk + ffs_blkstofrags(fs, lbn);
    838      1.111    simonb 	}
    839      1.111    simonb 
    840        1.1   mycroft 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
    841      1.134  dholland 		if (lbn < UFS_NDADDR + FFS_NINDIR(fs)) {
    842        1.1   mycroft 			cg = ino_to_cg(fs, ip->i_number);
    843      1.110    simonb 			return (cgbase(fs, cg) + fs->fs_frag);
    844        1.1   mycroft 		}
    845        1.1   mycroft 		/*
    846        1.1   mycroft 		 * Find a cylinder with greater than average number of
    847        1.1   mycroft 		 * unused data blocks.
    848        1.1   mycroft 		 */
    849        1.1   mycroft 		if (indx == 0 || bap[indx - 1] == 0)
    850        1.1   mycroft 			startcg =
    851        1.1   mycroft 			    ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
    852        1.1   mycroft 		else
    853       1.19    bouyer 			startcg = dtog(fs,
    854       1.30      fvdl 				ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
    855        1.1   mycroft 		startcg %= fs->fs_ncg;
    856        1.1   mycroft 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
    857        1.1   mycroft 		for (cg = startcg; cg < fs->fs_ncg; cg++)
    858        1.1   mycroft 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
    859      1.110    simonb 				return (cgbase(fs, cg) + fs->fs_frag);
    860        1.1   mycroft 			}
    861       1.52     lukem 		for (cg = 0; cg < startcg; cg++)
    862        1.1   mycroft 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
    863      1.110    simonb 				return (cgbase(fs, cg) + fs->fs_frag);
    864        1.1   mycroft 			}
    865       1.35   thorpej 		return (0);
    866        1.1   mycroft 	}
    867        1.1   mycroft 	/*
    868       1.60      fvdl 	 * We just always try to lay things out contiguously.
    869       1.60      fvdl 	 */
    870       1.60      fvdl 	return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
    871       1.60      fvdl }
    872       1.60      fvdl 
    873       1.60      fvdl daddr_t
    874      1.111    simonb ffs_blkpref_ufs2(struct inode *ip, daddr_t lbn, int indx, int flags,
    875      1.111    simonb     int64_t *bap)
    876       1.60      fvdl {
    877       1.60      fvdl 	struct fs *fs;
    878       1.60      fvdl 	int cg;
    879       1.60      fvdl 	int avgbfree, startcg;
    880       1.60      fvdl 
    881      1.101        ad 	KASSERT(mutex_owned(&ip->i_ump->um_lock));
    882      1.101        ad 
    883       1.60      fvdl 	fs = ip->i_fs;
    884      1.111    simonb 
    885      1.111    simonb 	/*
    886      1.111    simonb 	 * If allocating a contiguous file with B_CONTIG, use the hints
    887      1.111    simonb 	 * in the inode extentions to return the desired block.
    888      1.111    simonb 	 *
    889      1.111    simonb 	 * For metadata (indirect blocks) return the address of where
    890      1.111    simonb 	 * the first indirect block resides - we'll scan for the next
    891      1.111    simonb 	 * available slot if we need to allocate more than one indirect
    892      1.111    simonb 	 * block.  For data, return the address of the actual block
    893      1.111    simonb 	 * relative to the address of the first data block.
    894      1.111    simonb 	 */
    895      1.111    simonb 	if (flags & B_CONTIG) {
    896      1.111    simonb 		KASSERT(ip->i_ffs_first_data_blk != 0);
    897      1.111    simonb 		KASSERT(ip->i_ffs_first_indir_blk != 0);
    898      1.111    simonb 		if (flags & B_METAONLY)
    899      1.111    simonb 			return ip->i_ffs_first_indir_blk;
    900      1.111    simonb 		else
    901      1.138  dholland 			return ip->i_ffs_first_data_blk + ffs_blkstofrags(fs, lbn);
    902      1.111    simonb 	}
    903      1.111    simonb 
    904       1.60      fvdl 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
    905      1.134  dholland 		if (lbn < UFS_NDADDR + FFS_NINDIR(fs)) {
    906       1.60      fvdl 			cg = ino_to_cg(fs, ip->i_number);
    907      1.110    simonb 			return (cgbase(fs, cg) + fs->fs_frag);
    908       1.60      fvdl 		}
    909        1.1   mycroft 		/*
    910       1.60      fvdl 		 * Find a cylinder with greater than average number of
    911       1.60      fvdl 		 * unused data blocks.
    912        1.1   mycroft 		 */
    913       1.60      fvdl 		if (indx == 0 || bap[indx - 1] == 0)
    914       1.60      fvdl 			startcg =
    915       1.60      fvdl 			    ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
    916       1.60      fvdl 		else
    917       1.60      fvdl 			startcg = dtog(fs,
    918       1.60      fvdl 				ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
    919       1.60      fvdl 		startcg %= fs->fs_ncg;
    920       1.60      fvdl 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
    921       1.60      fvdl 		for (cg = startcg; cg < fs->fs_ncg; cg++)
    922       1.60      fvdl 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
    923      1.110    simonb 				return (cgbase(fs, cg) + fs->fs_frag);
    924       1.60      fvdl 			}
    925       1.60      fvdl 		for (cg = 0; cg < startcg; cg++)
    926       1.60      fvdl 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
    927      1.110    simonb 				return (cgbase(fs, cg) + fs->fs_frag);
    928       1.60      fvdl 			}
    929       1.60      fvdl 		return (0);
    930       1.60      fvdl 	}
    931       1.60      fvdl 	/*
    932       1.60      fvdl 	 * We just always try to lay things out contiguously.
    933       1.60      fvdl 	 */
    934       1.60      fvdl 	return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
    935        1.1   mycroft }
    936        1.1   mycroft 
    937       1.60      fvdl 
    938        1.1   mycroft /*
    939        1.1   mycroft  * Implement the cylinder overflow algorithm.
    940        1.1   mycroft  *
    941        1.1   mycroft  * The policy implemented by this algorithm is:
    942        1.1   mycroft  *   1) allocate the block in its requested cylinder group.
    943        1.1   mycroft  *   2) quadradically rehash on the cylinder group number.
    944        1.1   mycroft  *   3) brute force search for a free block.
    945      1.106     pooka  *
    946      1.106     pooka  * => called with um_lock held
    947      1.106     pooka  * => returns with um_lock released on success, held on failure
    948      1.106     pooka  *    (*allocator releases lock on success, retains lock on failure)
    949        1.1   mycroft  */
    950        1.1   mycroft /*VARARGS5*/
    951       1.58      fvdl static daddr_t
    952       1.85   thorpej ffs_hashalloc(struct inode *ip, int cg, daddr_t pref,
    953       1.85   thorpej     int size /* size for data blocks, mode for inodes */,
    954      1.111    simonb     int flags, daddr_t (*allocator)(struct inode *, int, daddr_t, int, int))
    955        1.1   mycroft {
    956       1.33  augustss 	struct fs *fs;
    957       1.58      fvdl 	daddr_t result;
    958        1.1   mycroft 	int i, icg = cg;
    959        1.1   mycroft 
    960        1.1   mycroft 	fs = ip->i_fs;
    961        1.1   mycroft 	/*
    962        1.1   mycroft 	 * 1: preferred cylinder group
    963        1.1   mycroft 	 */
    964      1.111    simonb 	result = (*allocator)(ip, cg, pref, size, flags);
    965        1.1   mycroft 	if (result)
    966        1.1   mycroft 		return (result);
    967      1.111    simonb 
    968      1.111    simonb 	if (flags & B_CONTIG)
    969      1.111    simonb 		return (result);
    970        1.1   mycroft 	/*
    971        1.1   mycroft 	 * 2: quadratic rehash
    972        1.1   mycroft 	 */
    973        1.1   mycroft 	for (i = 1; i < fs->fs_ncg; i *= 2) {
    974        1.1   mycroft 		cg += i;
    975        1.1   mycroft 		if (cg >= fs->fs_ncg)
    976        1.1   mycroft 			cg -= fs->fs_ncg;
    977      1.111    simonb 		result = (*allocator)(ip, cg, 0, size, flags);
    978        1.1   mycroft 		if (result)
    979        1.1   mycroft 			return (result);
    980        1.1   mycroft 	}
    981        1.1   mycroft 	/*
    982        1.1   mycroft 	 * 3: brute force search
    983        1.1   mycroft 	 * Note that we start at i == 2, since 0 was checked initially,
    984        1.1   mycroft 	 * and 1 is always checked in the quadratic rehash.
    985        1.1   mycroft 	 */
    986        1.1   mycroft 	cg = (icg + 2) % fs->fs_ncg;
    987        1.1   mycroft 	for (i = 2; i < fs->fs_ncg; i++) {
    988      1.111    simonb 		result = (*allocator)(ip, cg, 0, size, flags);
    989        1.1   mycroft 		if (result)
    990        1.1   mycroft 			return (result);
    991        1.1   mycroft 		cg++;
    992        1.1   mycroft 		if (cg == fs->fs_ncg)
    993        1.1   mycroft 			cg = 0;
    994        1.1   mycroft 	}
    995       1.35   thorpej 	return (0);
    996        1.1   mycroft }
    997        1.1   mycroft 
    998        1.1   mycroft /*
    999        1.1   mycroft  * Determine whether a fragment can be extended.
   1000        1.1   mycroft  *
   1001       1.81     perry  * Check to see if the necessary fragments are available, and
   1002        1.1   mycroft  * if they are, allocate them.
   1003      1.106     pooka  *
   1004      1.106     pooka  * => called with um_lock held
   1005      1.106     pooka  * => returns with um_lock released on success, held on failure
   1006        1.1   mycroft  */
   1007       1.58      fvdl static daddr_t
   1008       1.85   thorpej ffs_fragextend(struct inode *ip, int cg, daddr_t bprev, int osize, int nsize)
   1009        1.1   mycroft {
   1010      1.101        ad 	struct ufsmount *ump;
   1011       1.33  augustss 	struct fs *fs;
   1012       1.33  augustss 	struct cg *cgp;
   1013        1.1   mycroft 	struct buf *bp;
   1014       1.58      fvdl 	daddr_t bno;
   1015        1.1   mycroft 	int frags, bbase;
   1016        1.1   mycroft 	int i, error;
   1017       1.62      fvdl 	u_int8_t *blksfree;
   1018        1.1   mycroft 
   1019        1.1   mycroft 	fs = ip->i_fs;
   1020      1.101        ad 	ump = ip->i_ump;
   1021      1.101        ad 
   1022      1.101        ad 	KASSERT(mutex_owned(&ump->um_lock));
   1023      1.101        ad 
   1024      1.137  dholland 	if (fs->fs_cs(fs, cg).cs_nffree < ffs_numfrags(fs, nsize - osize))
   1025       1.35   thorpej 		return (0);
   1026      1.137  dholland 	frags = ffs_numfrags(fs, nsize);
   1027      1.138  dholland 	bbase = ffs_fragnum(fs, bprev);
   1028      1.138  dholland 	if (bbase > ffs_fragnum(fs, (bprev + frags - 1))) {
   1029        1.1   mycroft 		/* cannot extend across a block boundary */
   1030       1.35   thorpej 		return (0);
   1031        1.1   mycroft 	}
   1032      1.101        ad 	mutex_exit(&ump->um_lock);
   1033      1.136  dholland 	error = bread(ip->i_devvp, FFS_FSBTODB(fs, cgtod(fs, cg)),
   1034      1.107   hannken 		(int)fs->fs_cgsize, NOCRED, B_MODIFY, &bp);
   1035      1.101        ad 	if (error)
   1036      1.101        ad 		goto fail;
   1037        1.1   mycroft 	cgp = (struct cg *)bp->b_data;
   1038      1.101        ad 	if (!cg_chkmagic(cgp, UFS_FSNEEDSWAP(fs)))
   1039      1.101        ad 		goto fail;
   1040       1.92    kardel 	cgp->cg_old_time = ufs_rw32(time_second, UFS_FSNEEDSWAP(fs));
   1041       1.73       dbj 	if ((fs->fs_magic != FS_UFS1_MAGIC) ||
   1042       1.73       dbj 	    (fs->fs_old_flags & FS_FLAGS_UPDATED))
   1043       1.92    kardel 		cgp->cg_time = ufs_rw64(time_second, UFS_FSNEEDSWAP(fs));
   1044        1.1   mycroft 	bno = dtogd(fs, bprev);
   1045       1.62      fvdl 	blksfree = cg_blksfree(cgp, UFS_FSNEEDSWAP(fs));
   1046      1.137  dholland 	for (i = ffs_numfrags(fs, osize); i < frags; i++)
   1047      1.101        ad 		if (isclr(blksfree, bno + i))
   1048      1.101        ad 			goto fail;
   1049        1.1   mycroft 	/*
   1050        1.1   mycroft 	 * the current fragment can be extended
   1051        1.1   mycroft 	 * deduct the count on fragment being extended into
   1052        1.1   mycroft 	 * increase the count on the remaining fragment (if any)
   1053        1.1   mycroft 	 * allocate the extended piece
   1054        1.1   mycroft 	 */
   1055        1.1   mycroft 	for (i = frags; i < fs->fs_frag - bbase; i++)
   1056       1.62      fvdl 		if (isclr(blksfree, bno + i))
   1057        1.1   mycroft 			break;
   1058      1.137  dholland 	ufs_add32(cgp->cg_frsum[i - ffs_numfrags(fs, osize)], -1, UFS_FSNEEDSWAP(fs));
   1059        1.1   mycroft 	if (i != frags)
   1060       1.30      fvdl 		ufs_add32(cgp->cg_frsum[i - frags], 1, UFS_FSNEEDSWAP(fs));
   1061      1.101        ad 	mutex_enter(&ump->um_lock);
   1062      1.137  dholland 	for (i = ffs_numfrags(fs, osize); i < frags; i++) {
   1063       1.62      fvdl 		clrbit(blksfree, bno + i);
   1064       1.30      fvdl 		ufs_add32(cgp->cg_cs.cs_nffree, -1, UFS_FSNEEDSWAP(fs));
   1065        1.1   mycroft 		fs->fs_cstotal.cs_nffree--;
   1066        1.1   mycroft 		fs->fs_cs(fs, cg).cs_nffree--;
   1067        1.1   mycroft 	}
   1068        1.1   mycroft 	fs->fs_fmod = 1;
   1069      1.101        ad 	ACTIVECG_CLR(fs, cg);
   1070      1.101        ad 	mutex_exit(&ump->um_lock);
   1071        1.1   mycroft 	bdwrite(bp);
   1072        1.1   mycroft 	return (bprev);
   1073      1.101        ad 
   1074      1.101        ad  fail:
   1075      1.132   hannken  	if (bp != NULL)
   1076      1.132   hannken 		brelse(bp, 0);
   1077      1.101        ad  	mutex_enter(&ump->um_lock);
   1078      1.101        ad  	return (0);
   1079        1.1   mycroft }
   1080        1.1   mycroft 
   1081        1.1   mycroft /*
   1082        1.1   mycroft  * Determine whether a block can be allocated.
   1083        1.1   mycroft  *
   1084        1.1   mycroft  * Check to see if a block of the appropriate size is available,
   1085        1.1   mycroft  * and if it is, allocate it.
   1086        1.1   mycroft  */
   1087       1.58      fvdl static daddr_t
   1088      1.111    simonb ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size, int flags)
   1089        1.1   mycroft {
   1090      1.101        ad 	struct ufsmount *ump;
   1091       1.62      fvdl 	struct fs *fs = ip->i_fs;
   1092       1.30      fvdl 	struct cg *cgp;
   1093        1.1   mycroft 	struct buf *bp;
   1094       1.60      fvdl 	int32_t bno;
   1095       1.60      fvdl 	daddr_t blkno;
   1096       1.30      fvdl 	int error, frags, allocsiz, i;
   1097       1.62      fvdl 	u_int8_t *blksfree;
   1098       1.30      fvdl 	const int needswap = UFS_FSNEEDSWAP(fs);
   1099        1.1   mycroft 
   1100      1.101        ad 	ump = ip->i_ump;
   1101      1.101        ad 
   1102      1.101        ad 	KASSERT(mutex_owned(&ump->um_lock));
   1103      1.101        ad 
   1104        1.1   mycroft 	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
   1105       1.35   thorpej 		return (0);
   1106      1.101        ad 	mutex_exit(&ump->um_lock);
   1107      1.136  dholland 	error = bread(ip->i_devvp, FFS_FSBTODB(fs, cgtod(fs, cg)),
   1108      1.107   hannken 		(int)fs->fs_cgsize, NOCRED, B_MODIFY, &bp);
   1109      1.101        ad 	if (error)
   1110      1.101        ad 		goto fail;
   1111        1.1   mycroft 	cgp = (struct cg *)bp->b_data;
   1112       1.19    bouyer 	if (!cg_chkmagic(cgp, needswap) ||
   1113      1.101        ad 	    (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize))
   1114      1.101        ad 		goto fail;
   1115       1.92    kardel 	cgp->cg_old_time = ufs_rw32(time_second, needswap);
   1116       1.73       dbj 	if ((fs->fs_magic != FS_UFS1_MAGIC) ||
   1117       1.73       dbj 	    (fs->fs_old_flags & FS_FLAGS_UPDATED))
   1118       1.92    kardel 		cgp->cg_time = ufs_rw64(time_second, needswap);
   1119        1.1   mycroft 	if (size == fs->fs_bsize) {
   1120      1.101        ad 		mutex_enter(&ump->um_lock);
   1121      1.111    simonb 		blkno = ffs_alloccgblk(ip, bp, bpref, flags);
   1122       1.76   hannken 		ACTIVECG_CLR(fs, cg);
   1123      1.101        ad 		mutex_exit(&ump->um_lock);
   1124        1.1   mycroft 		bdwrite(bp);
   1125       1.60      fvdl 		return (blkno);
   1126        1.1   mycroft 	}
   1127        1.1   mycroft 	/*
   1128        1.1   mycroft 	 * check to see if any fragments are already available
   1129        1.1   mycroft 	 * allocsiz is the size which will be allocated, hacking
   1130        1.1   mycroft 	 * it down to a smaller size if necessary
   1131        1.1   mycroft 	 */
   1132       1.62      fvdl 	blksfree = cg_blksfree(cgp, needswap);
   1133      1.137  dholland 	frags = ffs_numfrags(fs, size);
   1134        1.1   mycroft 	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
   1135        1.1   mycroft 		if (cgp->cg_frsum[allocsiz] != 0)
   1136        1.1   mycroft 			break;
   1137        1.1   mycroft 	if (allocsiz == fs->fs_frag) {
   1138        1.1   mycroft 		/*
   1139       1.81     perry 		 * no fragments were available, so a block will be
   1140        1.1   mycroft 		 * allocated, and hacked up
   1141        1.1   mycroft 		 */
   1142      1.101        ad 		if (cgp->cg_cs.cs_nbfree == 0)
   1143      1.101        ad 			goto fail;
   1144      1.101        ad 		mutex_enter(&ump->um_lock);
   1145      1.111    simonb 		blkno = ffs_alloccgblk(ip, bp, bpref, flags);
   1146       1.60      fvdl 		bno = dtogd(fs, blkno);
   1147        1.1   mycroft 		for (i = frags; i < fs->fs_frag; i++)
   1148       1.62      fvdl 			setbit(blksfree, bno + i);
   1149        1.1   mycroft 		i = fs->fs_frag - frags;
   1150       1.19    bouyer 		ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
   1151        1.1   mycroft 		fs->fs_cstotal.cs_nffree += i;
   1152       1.30      fvdl 		fs->fs_cs(fs, cg).cs_nffree += i;
   1153        1.1   mycroft 		fs->fs_fmod = 1;
   1154       1.19    bouyer 		ufs_add32(cgp->cg_frsum[i], 1, needswap);
   1155       1.76   hannken 		ACTIVECG_CLR(fs, cg);
   1156      1.101        ad 		mutex_exit(&ump->um_lock);
   1157        1.1   mycroft 		bdwrite(bp);
   1158       1.60      fvdl 		return (blkno);
   1159        1.1   mycroft 	}
   1160       1.30      fvdl 	bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
   1161       1.30      fvdl #if 0
   1162       1.30      fvdl 	/*
   1163       1.30      fvdl 	 * XXX fvdl mapsearch will panic, and never return -1
   1164       1.58      fvdl 	 *          also: returning NULL as daddr_t ?
   1165       1.30      fvdl 	 */
   1166      1.101        ad 	if (bno < 0)
   1167      1.101        ad 		goto fail;
   1168       1.30      fvdl #endif
   1169        1.1   mycroft 	for (i = 0; i < frags; i++)
   1170       1.62      fvdl 		clrbit(blksfree, bno + i);
   1171      1.101        ad 	mutex_enter(&ump->um_lock);
   1172       1.19    bouyer 	ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap);
   1173        1.1   mycroft 	fs->fs_cstotal.cs_nffree -= frags;
   1174        1.1   mycroft 	fs->fs_cs(fs, cg).cs_nffree -= frags;
   1175        1.1   mycroft 	fs->fs_fmod = 1;
   1176       1.19    bouyer 	ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap);
   1177        1.1   mycroft 	if (frags != allocsiz)
   1178       1.19    bouyer 		ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap);
   1179      1.123  sborrill 	blkno = cgbase(fs, cg) + bno;
   1180      1.101        ad 	ACTIVECG_CLR(fs, cg);
   1181      1.101        ad 	mutex_exit(&ump->um_lock);
   1182        1.1   mycroft 	bdwrite(bp);
   1183       1.30      fvdl 	return blkno;
   1184      1.101        ad 
   1185      1.101        ad  fail:
   1186      1.132   hannken  	if (bp != NULL)
   1187      1.132   hannken 		brelse(bp, 0);
   1188      1.101        ad  	mutex_enter(&ump->um_lock);
   1189      1.101        ad  	return (0);
   1190        1.1   mycroft }
   1191        1.1   mycroft 
   1192        1.1   mycroft /*
   1193        1.1   mycroft  * Allocate a block in a cylinder group.
   1194        1.1   mycroft  *
   1195        1.1   mycroft  * This algorithm implements the following policy:
   1196        1.1   mycroft  *   1) allocate the requested block.
   1197        1.1   mycroft  *   2) allocate a rotationally optimal block in the same cylinder.
   1198        1.1   mycroft  *   3) allocate the next available block on the block rotor for the
   1199        1.1   mycroft  *      specified cylinder group.
   1200        1.1   mycroft  * Note that this routine only allocates fs_bsize blocks; these
   1201        1.1   mycroft  * blocks may be fragmented by the routine that allocates them.
   1202        1.1   mycroft  */
   1203       1.58      fvdl static daddr_t
   1204      1.111    simonb ffs_alloccgblk(struct inode *ip, struct buf *bp, daddr_t bpref, int flags)
   1205        1.1   mycroft {
   1206       1.62      fvdl 	struct fs *fs = ip->i_fs;
   1207       1.30      fvdl 	struct cg *cgp;
   1208      1.123  sborrill 	int cg;
   1209       1.60      fvdl 	daddr_t blkno;
   1210       1.60      fvdl 	int32_t bno;
   1211       1.60      fvdl 	u_int8_t *blksfree;
   1212       1.30      fvdl 	const int needswap = UFS_FSNEEDSWAP(fs);
   1213        1.1   mycroft 
   1214      1.141    martin 	KASSERT(mutex_owned(&ip->i_ump->um_lock));
   1215      1.101        ad 
   1216       1.30      fvdl 	cgp = (struct cg *)bp->b_data;
   1217       1.60      fvdl 	blksfree = cg_blksfree(cgp, needswap);
   1218       1.30      fvdl 	if (bpref == 0 || dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) {
   1219       1.19    bouyer 		bpref = ufs_rw32(cgp->cg_rotor, needswap);
   1220       1.60      fvdl 	} else {
   1221      1.138  dholland 		bpref = ffs_blknum(fs, bpref);
   1222       1.60      fvdl 		bno = dtogd(fs, bpref);
   1223        1.1   mycroft 		/*
   1224       1.60      fvdl 		 * if the requested block is available, use it
   1225        1.1   mycroft 		 */
   1226      1.138  dholland 		if (ffs_isblock(fs, blksfree, ffs_fragstoblks(fs, bno)))
   1227       1.60      fvdl 			goto gotit;
   1228      1.111    simonb 		/*
   1229      1.111    simonb 		 * if the requested data block isn't available and we are
   1230      1.111    simonb 		 * trying to allocate a contiguous file, return an error.
   1231      1.111    simonb 		 */
   1232      1.111    simonb 		if ((flags & (B_CONTIG | B_METAONLY)) == B_CONTIG)
   1233      1.111    simonb 			return (0);
   1234        1.1   mycroft 	}
   1235      1.111    simonb 
   1236        1.1   mycroft 	/*
   1237       1.60      fvdl 	 * Take the next available block in this cylinder group.
   1238        1.1   mycroft 	 */
   1239       1.30      fvdl 	bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
   1240        1.1   mycroft 	if (bno < 0)
   1241       1.35   thorpej 		return (0);
   1242       1.60      fvdl 	cgp->cg_rotor = ufs_rw32(bno, needswap);
   1243        1.1   mycroft gotit:
   1244      1.138  dholland 	blkno = ffs_fragstoblks(fs, bno);
   1245       1.60      fvdl 	ffs_clrblock(fs, blksfree, blkno);
   1246       1.30      fvdl 	ffs_clusteracct(fs, cgp, blkno, -1);
   1247       1.19    bouyer 	ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap);
   1248        1.1   mycroft 	fs->fs_cstotal.cs_nbfree--;
   1249       1.19    bouyer 	fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--;
   1250       1.73       dbj 	if ((fs->fs_magic == FS_UFS1_MAGIC) &&
   1251       1.73       dbj 	    ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0)) {
   1252       1.73       dbj 		int cylno;
   1253       1.73       dbj 		cylno = old_cbtocylno(fs, bno);
   1254       1.75       dbj 		KASSERT(cylno >= 0);
   1255       1.75       dbj 		KASSERT(cylno < fs->fs_old_ncyl);
   1256       1.75       dbj 		KASSERT(old_cbtorpos(fs, bno) >= 0);
   1257       1.75       dbj 		KASSERT(fs->fs_old_nrpos == 0 || old_cbtorpos(fs, bno) < fs->fs_old_nrpos);
   1258       1.73       dbj 		ufs_add16(old_cg_blks(fs, cgp, cylno, needswap)[old_cbtorpos(fs, bno)], -1,
   1259       1.73       dbj 		    needswap);
   1260       1.73       dbj 		ufs_add32(old_cg_blktot(cgp, needswap)[cylno], -1, needswap);
   1261       1.73       dbj 	}
   1262        1.1   mycroft 	fs->fs_fmod = 1;
   1263      1.123  sborrill 	cg = ufs_rw32(cgp->cg_cgx, needswap);
   1264      1.123  sborrill 	blkno = cgbase(fs, cg) + bno;
   1265       1.30      fvdl 	return (blkno);
   1266        1.1   mycroft }
   1267        1.1   mycroft 
   1268        1.1   mycroft /*
   1269        1.1   mycroft  * Determine whether an inode can be allocated.
   1270        1.1   mycroft  *
   1271        1.1   mycroft  * Check to see if an inode is available, and if it is,
   1272        1.1   mycroft  * allocate it using the following policy:
   1273        1.1   mycroft  *   1) allocate the requested inode.
   1274        1.1   mycroft  *   2) allocate the next available inode after the requested
   1275        1.1   mycroft  *      inode in the specified cylinder group.
   1276        1.1   mycroft  */
   1277       1.58      fvdl static daddr_t
   1278      1.111    simonb ffs_nodealloccg(struct inode *ip, int cg, daddr_t ipref, int mode, int flags)
   1279        1.1   mycroft {
   1280      1.101        ad 	struct ufsmount *ump = ip->i_ump;
   1281       1.62      fvdl 	struct fs *fs = ip->i_fs;
   1282       1.33  augustss 	struct cg *cgp;
   1283       1.60      fvdl 	struct buf *bp, *ibp;
   1284       1.60      fvdl 	u_int8_t *inosused;
   1285        1.1   mycroft 	int error, start, len, loc, map, i;
   1286  1.146.2.2    martin 	int32_t initediblk, maxiblk, irotor;
   1287      1.112   hannken 	daddr_t nalloc;
   1288       1.60      fvdl 	struct ufs2_dinode *dp2;
   1289       1.30      fvdl 	const int needswap = UFS_FSNEEDSWAP(fs);
   1290        1.1   mycroft 
   1291      1.101        ad 	KASSERT(mutex_owned(&ump->um_lock));
   1292      1.111    simonb 	UFS_WAPBL_JLOCK_ASSERT(ip->i_ump->um_mountp);
   1293      1.101        ad 
   1294        1.1   mycroft 	if (fs->fs_cs(fs, cg).cs_nifree == 0)
   1295       1.35   thorpej 		return (0);
   1296      1.101        ad 	mutex_exit(&ump->um_lock);
   1297      1.112   hannken 	ibp = NULL;
   1298  1.146.2.2    martin 	if (fs->fs_magic == FS_UFS2_MAGIC) {
   1299  1.146.2.2    martin 		initediblk = -1;
   1300  1.146.2.2    martin 	} else {
   1301  1.146.2.2    martin 		initediblk = fs->fs_ipg;
   1302  1.146.2.2    martin 	}
   1303  1.146.2.2    martin 	maxiblk = initediblk;
   1304  1.146.2.2    martin 
   1305      1.112   hannken retry:
   1306      1.136  dholland 	error = bread(ip->i_devvp, FFS_FSBTODB(fs, cgtod(fs, cg)),
   1307      1.107   hannken 		(int)fs->fs_cgsize, NOCRED, B_MODIFY, &bp);
   1308      1.101        ad 	if (error)
   1309      1.101        ad 		goto fail;
   1310        1.1   mycroft 	cgp = (struct cg *)bp->b_data;
   1311      1.101        ad 	if (!cg_chkmagic(cgp, needswap) || cgp->cg_cs.cs_nifree == 0)
   1312      1.101        ad 		goto fail;
   1313      1.112   hannken 
   1314      1.112   hannken 	if (ibp != NULL &&
   1315      1.112   hannken 	    initediblk != ufs_rw32(cgp->cg_initediblk, needswap)) {
   1316      1.112   hannken 		/* Another thread allocated more inodes so we retry the test. */
   1317      1.121        ad 		brelse(ibp, 0);
   1318      1.112   hannken 		ibp = NULL;
   1319      1.112   hannken 	}
   1320      1.112   hannken 	/*
   1321      1.112   hannken 	 * Check to see if we need to initialize more inodes.
   1322      1.112   hannken 	 */
   1323      1.112   hannken 	if (fs->fs_magic == FS_UFS2_MAGIC && ibp == NULL) {
   1324  1.146.2.2    martin 	        initediblk = ufs_rw32(cgp->cg_initediblk, needswap);
   1325  1.146.2.2    martin 		maxiblk = initediblk;
   1326      1.112   hannken 		nalloc = fs->fs_ipg - ufs_rw32(cgp->cg_cs.cs_nifree, needswap);
   1327      1.134  dholland 		if (nalloc + FFS_INOPB(fs) > initediblk &&
   1328      1.112   hannken 		    initediblk < ufs_rw32(cgp->cg_niblk, needswap)) {
   1329      1.112   hannken 			/*
   1330      1.112   hannken 			 * We have to release the cg buffer here to prevent
   1331      1.112   hannken 			 * a deadlock when reading the inode block will
   1332      1.112   hannken 			 * run a copy-on-write that might use this cg.
   1333      1.112   hannken 			 */
   1334      1.112   hannken 			brelse(bp, 0);
   1335      1.112   hannken 			bp = NULL;
   1336      1.136  dholland 			error = ffs_getblk(ip->i_devvp, FFS_FSBTODB(fs,
   1337      1.112   hannken 			    ino_to_fsba(fs, cg * fs->fs_ipg + initediblk)),
   1338      1.112   hannken 			    FFS_NOBLK, fs->fs_bsize, false, &ibp);
   1339      1.112   hannken 			if (error)
   1340      1.112   hannken 				goto fail;
   1341  1.146.2.2    martin 
   1342  1.146.2.2    martin 			maxiblk += FFS_INOPB(fs);
   1343  1.146.2.2    martin 
   1344      1.112   hannken 			goto retry;
   1345      1.112   hannken 		}
   1346      1.112   hannken 	}
   1347      1.112   hannken 
   1348       1.92    kardel 	cgp->cg_old_time = ufs_rw32(time_second, needswap);
   1349       1.73       dbj 	if ((fs->fs_magic != FS_UFS1_MAGIC) ||
   1350       1.73       dbj 	    (fs->fs_old_flags & FS_FLAGS_UPDATED))
   1351       1.92    kardel 		cgp->cg_time = ufs_rw64(time_second, needswap);
   1352       1.60      fvdl 	inosused = cg_inosused(cgp, needswap);
   1353  1.146.2.2    martin 
   1354        1.1   mycroft 	if (ipref) {
   1355        1.1   mycroft 		ipref %= fs->fs_ipg;
   1356  1.146.2.2    martin 		/* safeguard to stay in (to be) allocated range */
   1357  1.146.2.2    martin 		if (ipref < maxiblk && isclr(inosused, ipref))
   1358        1.1   mycroft 			goto gotit;
   1359        1.1   mycroft 	}
   1360  1.146.2.2    martin 
   1361  1.146.2.2    martin 	irotor = ufs_rw32(cgp->cg_irotor, needswap);
   1362  1.146.2.2    martin 
   1363  1.146.2.2    martin 	KASSERTMSG(irotor < initediblk, "%s: allocation botch: cg=%d, irotor %d"
   1364  1.146.2.2    martin 		   " out of bounds, initediblk=%d",
   1365  1.146.2.2    martin 		   __func__, cg, irotor, initediblk);
   1366  1.146.2.2    martin 
   1367  1.146.2.2    martin 	start = irotor / NBBY;
   1368  1.146.2.2    martin 	len = howmany(maxiblk - irotor, NBBY);
   1369       1.60      fvdl 	loc = skpc(0xff, len, &inosused[start]);
   1370        1.1   mycroft 	if (loc == 0) {
   1371        1.1   mycroft 		len = start + 1;
   1372        1.1   mycroft 		start = 0;
   1373       1.60      fvdl 		loc = skpc(0xff, len, &inosused[0]);
   1374        1.1   mycroft 		if (loc == 0) {
   1375       1.13  christos 			printf("cg = %d, irotor = %d, fs = %s\n",
   1376       1.19    bouyer 			    cg, ufs_rw32(cgp->cg_irotor, needswap),
   1377       1.19    bouyer 				fs->fs_fsmnt);
   1378        1.1   mycroft 			panic("ffs_nodealloccg: map corrupted");
   1379        1.1   mycroft 			/* NOTREACHED */
   1380        1.1   mycroft 		}
   1381        1.1   mycroft 	}
   1382        1.1   mycroft 	i = start + len - loc;
   1383      1.126     rmind 	map = inosused[i] ^ 0xff;
   1384      1.126     rmind 	if (map == 0) {
   1385      1.126     rmind 		printf("fs = %s\n", fs->fs_fsmnt);
   1386      1.126     rmind 		panic("ffs_nodealloccg: block not in map");
   1387        1.1   mycroft 	}
   1388  1.146.2.2    martin 
   1389      1.126     rmind 	ipref = i * NBBY + ffs(map) - 1;
   1390  1.146.2.2    martin 
   1391      1.126     rmind 	cgp->cg_irotor = ufs_rw32(ipref, needswap);
   1392  1.146.2.2    martin 
   1393        1.1   mycroft gotit:
   1394  1.146.2.2    martin 	KASSERTMSG(ipref < maxiblk, "%s: allocation botch: cg=%d attempt to "
   1395  1.146.2.2    martin 		   "allocate inode index %d beyond max allocated index %d"
   1396  1.146.2.2    martin 		   " of %d inodes/cg",
   1397  1.146.2.2    martin 		   __func__, cg, (int)ipref, maxiblk, cgp->cg_niblk);
   1398  1.146.2.2    martin 
   1399      1.111    simonb 	UFS_WAPBL_REGISTER_INODE(ip->i_ump->um_mountp, cg * fs->fs_ipg + ipref,
   1400      1.111    simonb 	    mode);
   1401       1.60      fvdl 	/*
   1402       1.60      fvdl 	 * Check to see if we need to initialize more inodes.
   1403       1.60      fvdl 	 */
   1404      1.112   hannken 	if (ibp != NULL) {
   1405      1.112   hannken 		KASSERT(initediblk == ufs_rw32(cgp->cg_initediblk, needswap));
   1406      1.108   hannken 		memset(ibp->b_data, 0, fs->fs_bsize);
   1407      1.108   hannken 		dp2 = (struct ufs2_dinode *)(ibp->b_data);
   1408      1.134  dholland 		for (i = 0; i < FFS_INOPB(fs); i++) {
   1409       1.60      fvdl 			/*
   1410       1.60      fvdl 			 * Don't bother to swap, it's supposed to be
   1411       1.60      fvdl 			 * random, after all.
   1412       1.60      fvdl 			 */
   1413      1.130       tls 			dp2->di_gen = (cprng_fast32() & INT32_MAX) / 2 + 1;
   1414       1.60      fvdl 			dp2++;
   1415       1.60      fvdl 		}
   1416      1.134  dholland 		initediblk += FFS_INOPB(fs);
   1417       1.60      fvdl 		cgp->cg_initediblk = ufs_rw32(initediblk, needswap);
   1418       1.60      fvdl 	}
   1419       1.60      fvdl 
   1420      1.101        ad 	mutex_enter(&ump->um_lock);
   1421       1.76   hannken 	ACTIVECG_CLR(fs, cg);
   1422      1.101        ad 	setbit(inosused, ipref);
   1423      1.101        ad 	ufs_add32(cgp->cg_cs.cs_nifree, -1, needswap);
   1424      1.101        ad 	fs->fs_cstotal.cs_nifree--;
   1425      1.101        ad 	fs->fs_cs(fs, cg).cs_nifree--;
   1426      1.101        ad 	fs->fs_fmod = 1;
   1427      1.101        ad 	if ((mode & IFMT) == IFDIR) {
   1428      1.101        ad 		ufs_add32(cgp->cg_cs.cs_ndir, 1, needswap);
   1429      1.101        ad 		fs->fs_cstotal.cs_ndir++;
   1430      1.101        ad 		fs->fs_cs(fs, cg).cs_ndir++;
   1431      1.101        ad 	}
   1432      1.101        ad 	mutex_exit(&ump->um_lock);
   1433      1.112   hannken 	if (ibp != NULL) {
   1434      1.112   hannken 		bwrite(bp);
   1435      1.104   hannken 		bawrite(ibp);
   1436      1.112   hannken 	} else
   1437      1.112   hannken 		bdwrite(bp);
   1438        1.1   mycroft 	return (cg * fs->fs_ipg + ipref);
   1439      1.101        ad  fail:
   1440      1.112   hannken 	if (bp != NULL)
   1441      1.112   hannken 		brelse(bp, 0);
   1442      1.112   hannken 	if (ibp != NULL)
   1443      1.121        ad 		brelse(ibp, 0);
   1444      1.101        ad 	mutex_enter(&ump->um_lock);
   1445      1.101        ad 	return (0);
   1446        1.1   mycroft }
   1447        1.1   mycroft 
   1448        1.1   mycroft /*
   1449      1.111    simonb  * Allocate a block or fragment.
   1450      1.111    simonb  *
   1451      1.111    simonb  * The specified block or fragment is removed from the
   1452      1.111    simonb  * free map, possibly fragmenting a block in the process.
   1453      1.111    simonb  *
   1454      1.111    simonb  * This implementation should mirror fs_blkfree
   1455      1.111    simonb  *
   1456      1.111    simonb  * => um_lock not held on entry or exit
   1457      1.111    simonb  */
   1458      1.111    simonb int
   1459      1.111    simonb ffs_blkalloc(struct inode *ip, daddr_t bno, long size)
   1460      1.111    simonb {
   1461      1.116     joerg 	int error;
   1462      1.111    simonb 
   1463      1.116     joerg 	error = ffs_check_bad_allocation(__func__, ip->i_fs, bno, size,
   1464      1.116     joerg 	    ip->i_dev, ip->i_uid);
   1465      1.116     joerg 	if (error)
   1466      1.116     joerg 		return error;
   1467      1.115     joerg 
   1468      1.115     joerg 	return ffs_blkalloc_ump(ip->i_ump, bno, size);
   1469      1.115     joerg }
   1470      1.115     joerg 
   1471      1.115     joerg int
   1472      1.115     joerg ffs_blkalloc_ump(struct ufsmount *ump, daddr_t bno, long size)
   1473      1.115     joerg {
   1474      1.115     joerg 	struct fs *fs = ump->um_fs;
   1475      1.115     joerg 	struct cg *cgp;
   1476      1.115     joerg 	struct buf *bp;
   1477      1.115     joerg 	int32_t fragno, cgbno;
   1478      1.115     joerg 	int i, error, cg, blk, frags, bbase;
   1479      1.115     joerg 	u_int8_t *blksfree;
   1480      1.115     joerg 	const int needswap = UFS_FSNEEDSWAP(fs);
   1481      1.115     joerg 
   1482      1.134  dholland 	KASSERT((u_int)size <= fs->fs_bsize && ffs_fragoff(fs, size) == 0 &&
   1483      1.138  dholland 	    ffs_fragnum(fs, bno) + ffs_numfrags(fs, size) <= fs->fs_frag);
   1484      1.115     joerg 	KASSERT(bno < fs->fs_size);
   1485      1.115     joerg 
   1486      1.115     joerg 	cg = dtog(fs, bno);
   1487      1.136  dholland 	error = bread(ump->um_devvp, FFS_FSBTODB(fs, cgtod(fs, cg)),
   1488      1.111    simonb 		(int)fs->fs_cgsize, NOCRED, B_MODIFY, &bp);
   1489      1.111    simonb 	if (error) {
   1490      1.111    simonb 		return error;
   1491      1.111    simonb 	}
   1492      1.111    simonb 	cgp = (struct cg *)bp->b_data;
   1493      1.111    simonb 	if (!cg_chkmagic(cgp, needswap)) {
   1494      1.111    simonb 		brelse(bp, 0);
   1495      1.111    simonb 		return EIO;
   1496      1.111    simonb 	}
   1497      1.111    simonb 	cgp->cg_old_time = ufs_rw32(time_second, needswap);
   1498      1.111    simonb 	cgp->cg_time = ufs_rw64(time_second, needswap);
   1499      1.111    simonb 	cgbno = dtogd(fs, bno);
   1500      1.111    simonb 	blksfree = cg_blksfree(cgp, needswap);
   1501      1.111    simonb 
   1502      1.111    simonb 	mutex_enter(&ump->um_lock);
   1503      1.111    simonb 	if (size == fs->fs_bsize) {
   1504      1.138  dholland 		fragno = ffs_fragstoblks(fs, cgbno);
   1505      1.111    simonb 		if (!ffs_isblock(fs, blksfree, fragno)) {
   1506      1.111    simonb 			mutex_exit(&ump->um_lock);
   1507      1.111    simonb 			brelse(bp, 0);
   1508      1.111    simonb 			return EBUSY;
   1509      1.111    simonb 		}
   1510      1.111    simonb 		ffs_clrblock(fs, blksfree, fragno);
   1511      1.111    simonb 		ffs_clusteracct(fs, cgp, fragno, -1);
   1512      1.111    simonb 		ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap);
   1513      1.111    simonb 		fs->fs_cstotal.cs_nbfree--;
   1514      1.111    simonb 		fs->fs_cs(fs, cg).cs_nbfree--;
   1515      1.111    simonb 	} else {
   1516      1.138  dholland 		bbase = cgbno - ffs_fragnum(fs, cgbno);
   1517      1.111    simonb 
   1518      1.137  dholland 		frags = ffs_numfrags(fs, size);
   1519      1.111    simonb 		for (i = 0; i < frags; i++) {
   1520      1.111    simonb 			if (isclr(blksfree, cgbno + i)) {
   1521      1.111    simonb 				mutex_exit(&ump->um_lock);
   1522      1.111    simonb 				brelse(bp, 0);
   1523      1.111    simonb 				return EBUSY;
   1524      1.111    simonb 			}
   1525      1.111    simonb 		}
   1526      1.111    simonb 		/*
   1527      1.111    simonb 		 * if a complete block is being split, account for it
   1528      1.111    simonb 		 */
   1529      1.138  dholland 		fragno = ffs_fragstoblks(fs, bbase);
   1530      1.111    simonb 		if (ffs_isblock(fs, blksfree, fragno)) {
   1531      1.111    simonb 			ufs_add32(cgp->cg_cs.cs_nffree, fs->fs_frag, needswap);
   1532      1.111    simonb 			fs->fs_cstotal.cs_nffree += fs->fs_frag;
   1533      1.111    simonb 			fs->fs_cs(fs, cg).cs_nffree += fs->fs_frag;
   1534      1.111    simonb 			ffs_clusteracct(fs, cgp, fragno, -1);
   1535      1.111    simonb 			ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap);
   1536      1.111    simonb 			fs->fs_cstotal.cs_nbfree--;
   1537      1.111    simonb 			fs->fs_cs(fs, cg).cs_nbfree--;
   1538      1.111    simonb 		}
   1539      1.111    simonb 		/*
   1540      1.111    simonb 		 * decrement the counts associated with the old frags
   1541      1.111    simonb 		 */
   1542      1.111    simonb 		blk = blkmap(fs, blksfree, bbase);
   1543      1.111    simonb 		ffs_fragacct(fs, blk, cgp->cg_frsum, -1, needswap);
   1544      1.111    simonb 		/*
   1545      1.111    simonb 		 * allocate the fragment
   1546      1.111    simonb 		 */
   1547      1.111    simonb 		for (i = 0; i < frags; i++) {
   1548      1.111    simonb 			clrbit(blksfree, cgbno + i);
   1549      1.111    simonb 		}
   1550      1.111    simonb 		ufs_add32(cgp->cg_cs.cs_nffree, -i, needswap);
   1551      1.111    simonb 		fs->fs_cstotal.cs_nffree -= i;
   1552      1.111    simonb 		fs->fs_cs(fs, cg).cs_nffree -= i;
   1553      1.111    simonb 		/*
   1554      1.111    simonb 		 * add back in counts associated with the new frags
   1555      1.111    simonb 		 */
   1556      1.111    simonb 		blk = blkmap(fs, blksfree, bbase);
   1557      1.111    simonb 		ffs_fragacct(fs, blk, cgp->cg_frsum, 1, needswap);
   1558      1.111    simonb 	}
   1559      1.111    simonb 	fs->fs_fmod = 1;
   1560      1.111    simonb 	ACTIVECG_CLR(fs, cg);
   1561      1.111    simonb 	mutex_exit(&ump->um_lock);
   1562      1.111    simonb 	bdwrite(bp);
   1563      1.111    simonb 	return 0;
   1564      1.111    simonb }
   1565      1.111    simonb 
   1566      1.111    simonb /*
   1567        1.1   mycroft  * Free a block or fragment.
   1568        1.1   mycroft  *
   1569        1.1   mycroft  * The specified block or fragment is placed back in the
   1570       1.81     perry  * free map. If a fragment is deallocated, a possible
   1571        1.1   mycroft  * block reassembly is checked.
   1572      1.106     pooka  *
   1573      1.106     pooka  * => um_lock not held on entry or exit
   1574        1.1   mycroft  */
   1575      1.131  drochner static void
   1576      1.131  drochner ffs_blkfree_cg(struct fs *fs, struct vnode *devvp, daddr_t bno, long size)
   1577        1.1   mycroft {
   1578       1.33  augustss 	struct cg *cgp;
   1579        1.1   mycroft 	struct buf *bp;
   1580       1.76   hannken 	struct ufsmount *ump;
   1581       1.76   hannken 	daddr_t cgblkno;
   1582      1.116     joerg 	int error, cg;
   1583       1.76   hannken 	dev_t dev;
   1584      1.113   hannken 	const bool devvp_is_snapshot = (devvp->v_type != VBLK);
   1585      1.118     joerg 	const int needswap = UFS_FSNEEDSWAP(fs);
   1586        1.1   mycroft 
   1587      1.116     joerg 	KASSERT(!devvp_is_snapshot);
   1588      1.116     joerg 
   1589       1.76   hannken 	cg = dtog(fs, bno);
   1590      1.116     joerg 	dev = devvp->v_rdev;
   1591      1.140   hannken 	ump = VFSTOUFS(spec_node_getmountedfs(devvp));
   1592      1.119     joerg 	KASSERT(fs == ump->um_fs);
   1593      1.136  dholland 	cgblkno = FFS_FSBTODB(fs, cgtod(fs, cg));
   1594      1.116     joerg 
   1595      1.116     joerg 	error = bread(devvp, cgblkno, (int)fs->fs_cgsize,
   1596      1.116     joerg 	    NOCRED, B_MODIFY, &bp);
   1597      1.116     joerg 	if (error) {
   1598      1.116     joerg 		return;
   1599       1.76   hannken 	}
   1600      1.116     joerg 	cgp = (struct cg *)bp->b_data;
   1601      1.116     joerg 	if (!cg_chkmagic(cgp, needswap)) {
   1602      1.116     joerg 		brelse(bp, 0);
   1603      1.116     joerg 		return;
   1604        1.1   mycroft 	}
   1605       1.76   hannken 
   1606      1.119     joerg 	ffs_blkfree_common(ump, fs, dev, bp, bno, size, devvp_is_snapshot);
   1607      1.119     joerg 
   1608      1.119     joerg 	bdwrite(bp);
   1609      1.116     joerg }
   1610      1.116     joerg 
   1611      1.131  drochner struct discardopdata {
   1612      1.131  drochner 	struct work wk; /* must be first */
   1613      1.131  drochner 	struct vnode *devvp;
   1614      1.131  drochner 	daddr_t bno;
   1615      1.131  drochner 	long size;
   1616      1.131  drochner };
   1617      1.131  drochner 
   1618      1.131  drochner struct discarddata {
   1619      1.131  drochner 	struct fs *fs;
   1620      1.131  drochner 	struct discardopdata *entry;
   1621      1.131  drochner 	long maxsize;
   1622      1.131  drochner 	kmutex_t entrylk;
   1623      1.131  drochner 	struct workqueue *wq;
   1624      1.131  drochner 	int wqcnt, wqdraining;
   1625      1.131  drochner 	kmutex_t wqlk;
   1626      1.131  drochner 	kcondvar_t wqcv;
   1627      1.131  drochner 	/* timer for flush? */
   1628      1.131  drochner };
   1629      1.131  drochner 
   1630      1.131  drochner static void
   1631      1.131  drochner ffs_blkfree_td(struct fs *fs, struct discardopdata *td)
   1632      1.131  drochner {
   1633  1.146.2.1   msaitoh 	struct mount *mp = spec_node_getmountedfs(td->devvp);
   1634      1.131  drochner 	long todo;
   1635  1.146.2.1   msaitoh 	int error;
   1636      1.131  drochner 
   1637      1.131  drochner 	while (td->size) {
   1638      1.131  drochner 		todo = min(td->size,
   1639      1.138  dholland 		  ffs_lfragtosize(fs, (fs->fs_frag - ffs_fragnum(fs, td->bno))));
   1640  1.146.2.1   msaitoh 		error = UFS_WAPBL_BEGIN(mp);
   1641  1.146.2.1   msaitoh 		if (error) {
   1642  1.146.2.1   msaitoh 			printf("ffs: failed to begin wapbl transaction"
   1643  1.146.2.1   msaitoh 			    " for discard: %d\n", error);
   1644  1.146.2.1   msaitoh 			break;
   1645  1.146.2.1   msaitoh 		}
   1646      1.131  drochner 		ffs_blkfree_cg(fs, td->devvp, td->bno, todo);
   1647  1.146.2.1   msaitoh 		UFS_WAPBL_END(mp);
   1648      1.137  dholland 		td->bno += ffs_numfrags(fs, todo);
   1649      1.131  drochner 		td->size -= todo;
   1650      1.131  drochner 	}
   1651      1.131  drochner }
   1652      1.131  drochner 
   1653      1.131  drochner static void
   1654      1.131  drochner ffs_discardcb(struct work *wk, void *arg)
   1655      1.131  drochner {
   1656      1.131  drochner 	struct discardopdata *td = (void *)wk;
   1657      1.131  drochner 	struct discarddata *ts = arg;
   1658      1.131  drochner 	struct fs *fs = ts->fs;
   1659      1.146  dholland 	off_t start, len;
   1660      1.139    martin #ifdef TRIMDEBUG
   1661      1.131  drochner 	int error;
   1662      1.139    martin #endif
   1663      1.131  drochner 
   1664      1.146  dholland /* like FSBTODB but emits bytes; XXX move to fs.h */
   1665      1.146  dholland #ifndef FFS_FSBTOBYTES
   1666      1.146  dholland #define FFS_FSBTOBYTES(fs, b) ((b) << (fs)->fs_fshift)
   1667      1.146  dholland #endif
   1668      1.146  dholland 
   1669      1.146  dholland 	start = FFS_FSBTOBYTES(fs, td->bno);
   1670      1.146  dholland 	len = td->size;
   1671      1.139    martin #ifdef TRIMDEBUG
   1672      1.139    martin 	error =
   1673      1.139    martin #endif
   1674      1.146  dholland 		VOP_FDISCARD(td->devvp, start, len);
   1675      1.131  drochner #ifdef TRIMDEBUG
   1676      1.131  drochner 	printf("trim(%" PRId64 ",%ld):%d\n", td->bno, td->size, error);
   1677      1.131  drochner #endif
   1678      1.131  drochner 
   1679      1.131  drochner 	ffs_blkfree_td(fs, td);
   1680      1.131  drochner 	kmem_free(td, sizeof(*td));
   1681      1.131  drochner 	mutex_enter(&ts->wqlk);
   1682      1.131  drochner 	ts->wqcnt--;
   1683      1.131  drochner 	if (ts->wqdraining && !ts->wqcnt)
   1684      1.131  drochner 		cv_signal(&ts->wqcv);
   1685      1.131  drochner 	mutex_exit(&ts->wqlk);
   1686      1.131  drochner }
   1687      1.131  drochner 
   1688      1.131  drochner void *
   1689      1.131  drochner ffs_discard_init(struct vnode *devvp, struct fs *fs)
   1690      1.131  drochner {
   1691      1.131  drochner 	struct discarddata *ts;
   1692      1.131  drochner 	int error;
   1693      1.131  drochner 
   1694      1.131  drochner 	ts = kmem_zalloc(sizeof (*ts), KM_SLEEP);
   1695      1.131  drochner 	error = workqueue_create(&ts->wq, "trimwq", ffs_discardcb, ts,
   1696      1.131  drochner 				 0, 0, 0);
   1697      1.131  drochner 	if (error) {
   1698      1.131  drochner 		kmem_free(ts, sizeof (*ts));
   1699      1.131  drochner 		return NULL;
   1700      1.131  drochner 	}
   1701      1.131  drochner 	mutex_init(&ts->entrylk, MUTEX_DEFAULT, IPL_NONE);
   1702      1.131  drochner 	mutex_init(&ts->wqlk, MUTEX_DEFAULT, IPL_NONE);
   1703      1.131  drochner 	cv_init(&ts->wqcv, "trimwqcv");
   1704      1.146  dholland 	ts->maxsize = 100*1024; /* XXX */
   1705      1.131  drochner 	ts->fs = fs;
   1706      1.131  drochner 	return ts;
   1707      1.131  drochner }
   1708      1.131  drochner 
   1709      1.131  drochner void
   1710      1.131  drochner ffs_discard_finish(void *vts, int flags)
   1711      1.131  drochner {
   1712      1.131  drochner 	struct discarddata *ts = vts;
   1713      1.131  drochner 	struct discardopdata *td = NULL;
   1714      1.131  drochner 	int res = 0;
   1715      1.131  drochner 
   1716      1.131  drochner 	/* wait for workqueue to drain */
   1717      1.131  drochner 	mutex_enter(&ts->wqlk);
   1718      1.131  drochner 	if (ts->wqcnt) {
   1719      1.131  drochner 		ts->wqdraining = 1;
   1720      1.131  drochner 		res = cv_timedwait(&ts->wqcv, &ts->wqlk, mstohz(5000));
   1721      1.131  drochner 	}
   1722      1.131  drochner 	mutex_exit(&ts->wqlk);
   1723      1.131  drochner 	if (res)
   1724      1.131  drochner 		printf("ffs_discarddata drain timeout\n");
   1725      1.131  drochner 
   1726      1.131  drochner 	mutex_enter(&ts->entrylk);
   1727      1.131  drochner 	if (ts->entry) {
   1728      1.131  drochner 		td = ts->entry;
   1729      1.131  drochner 		ts->entry = NULL;
   1730      1.131  drochner 	}
   1731      1.131  drochner 	mutex_exit(&ts->entrylk);
   1732      1.131  drochner 	if (td) {
   1733      1.131  drochner 		/* XXX don't tell disk, its optional */
   1734      1.131  drochner 		ffs_blkfree_td(ts->fs, td);
   1735      1.131  drochner #ifdef TRIMDEBUG
   1736      1.131  drochner 		printf("finish(%" PRId64 ",%ld)\n", td->bno, td->size);
   1737      1.131  drochner #endif
   1738      1.131  drochner 		kmem_free(td, sizeof(*td));
   1739      1.131  drochner 	}
   1740      1.131  drochner 
   1741      1.131  drochner 	cv_destroy(&ts->wqcv);
   1742      1.131  drochner 	mutex_destroy(&ts->entrylk);
   1743      1.131  drochner 	mutex_destroy(&ts->wqlk);
   1744      1.131  drochner 	workqueue_destroy(ts->wq);
   1745      1.131  drochner 	kmem_free(ts, sizeof(*ts));
   1746      1.131  drochner }
   1747      1.131  drochner 
   1748      1.131  drochner void
   1749      1.131  drochner ffs_blkfree(struct fs *fs, struct vnode *devvp, daddr_t bno, long size,
   1750      1.131  drochner     ino_t inum)
   1751      1.131  drochner {
   1752      1.131  drochner 	struct ufsmount *ump;
   1753      1.131  drochner 	int error;
   1754      1.131  drochner 	dev_t dev;
   1755      1.131  drochner 	struct discarddata *ts;
   1756      1.131  drochner 	struct discardopdata *td;
   1757      1.131  drochner 
   1758      1.131  drochner 	dev = devvp->v_rdev;
   1759      1.140   hannken 	ump = VFSTOUFS(spec_node_getmountedfs(devvp));
   1760      1.131  drochner 	if (ffs_snapblkfree(fs, devvp, bno, size, inum))
   1761      1.131  drochner 		return;
   1762      1.131  drochner 
   1763      1.131  drochner 	error = ffs_check_bad_allocation(__func__, fs, bno, size, dev, inum);
   1764      1.131  drochner 	if (error)
   1765      1.131  drochner 		return;
   1766      1.131  drochner 
   1767      1.131  drochner 	if (!ump->um_discarddata) {
   1768      1.131  drochner 		ffs_blkfree_cg(fs, devvp, bno, size);
   1769      1.131  drochner 		return;
   1770      1.131  drochner 	}
   1771      1.131  drochner 
   1772      1.131  drochner #ifdef TRIMDEBUG
   1773      1.131  drochner 	printf("blkfree(%" PRId64 ",%ld)\n", bno, size);
   1774      1.131  drochner #endif
   1775      1.131  drochner 	ts = ump->um_discarddata;
   1776      1.131  drochner 	td = NULL;
   1777      1.131  drochner 
   1778      1.131  drochner 	mutex_enter(&ts->entrylk);
   1779      1.131  drochner 	if (ts->entry) {
   1780      1.131  drochner 		td = ts->entry;
   1781      1.131  drochner 		/* ffs deallocs backwards, check for prepend only */
   1782      1.137  dholland 		if (td->bno == bno + ffs_numfrags(fs, size)
   1783      1.131  drochner 		    && td->size + size <= ts->maxsize) {
   1784      1.131  drochner 			td->bno = bno;
   1785      1.131  drochner 			td->size += size;
   1786      1.131  drochner 			if (td->size < ts->maxsize) {
   1787      1.131  drochner #ifdef TRIMDEBUG
   1788      1.131  drochner 				printf("defer(%" PRId64 ",%ld)\n", td->bno, td->size);
   1789      1.131  drochner #endif
   1790      1.131  drochner 				mutex_exit(&ts->entrylk);
   1791      1.131  drochner 				return;
   1792      1.131  drochner 			}
   1793      1.131  drochner 			size = 0; /* mark done */
   1794      1.131  drochner 		}
   1795      1.131  drochner 		ts->entry = NULL;
   1796      1.131  drochner 	}
   1797      1.131  drochner 	mutex_exit(&ts->entrylk);
   1798      1.131  drochner 
   1799      1.131  drochner 	if (td) {
   1800      1.131  drochner #ifdef TRIMDEBUG
   1801      1.131  drochner 		printf("enq old(%" PRId64 ",%ld)\n", td->bno, td->size);
   1802      1.131  drochner #endif
   1803      1.131  drochner 		mutex_enter(&ts->wqlk);
   1804      1.131  drochner 		ts->wqcnt++;
   1805      1.131  drochner 		mutex_exit(&ts->wqlk);
   1806      1.131  drochner 		workqueue_enqueue(ts->wq, &td->wk, NULL);
   1807      1.131  drochner 	}
   1808      1.131  drochner 	if (!size)
   1809      1.131  drochner 		return;
   1810      1.131  drochner 
   1811      1.131  drochner 	td = kmem_alloc(sizeof(*td), KM_SLEEP);
   1812      1.131  drochner 	td->devvp = devvp;
   1813      1.131  drochner 	td->bno = bno;
   1814      1.131  drochner 	td->size = size;
   1815      1.131  drochner 
   1816      1.131  drochner 	if (td->size < ts->maxsize) { /* XXX always the case */
   1817      1.131  drochner 		mutex_enter(&ts->entrylk);
   1818      1.131  drochner 		if (!ts->entry) { /* possible race? */
   1819      1.131  drochner #ifdef TRIMDEBUG
   1820      1.131  drochner 			printf("defer(%" PRId64 ",%ld)\n", td->bno, td->size);
   1821      1.131  drochner #endif
   1822      1.131  drochner 			ts->entry = td;
   1823      1.131  drochner 			td = NULL;
   1824      1.131  drochner 		}
   1825      1.131  drochner 		mutex_exit(&ts->entrylk);
   1826      1.131  drochner 	}
   1827      1.131  drochner 	if (td) {
   1828      1.131  drochner #ifdef TRIMDEBUG
   1829      1.131  drochner 		printf("enq new(%" PRId64 ",%ld)\n", td->bno, td->size);
   1830      1.131  drochner #endif
   1831      1.131  drochner 		mutex_enter(&ts->wqlk);
   1832      1.131  drochner 		ts->wqcnt++;
   1833      1.131  drochner 		mutex_exit(&ts->wqlk);
   1834      1.131  drochner 		workqueue_enqueue(ts->wq, &td->wk, NULL);
   1835      1.131  drochner 	}
   1836      1.131  drochner }
   1837      1.131  drochner 
   1838      1.116     joerg /*
   1839      1.116     joerg  * Free a block or fragment from a snapshot cg copy.
   1840      1.116     joerg  *
   1841      1.116     joerg  * The specified block or fragment is placed back in the
   1842      1.116     joerg  * free map. If a fragment is deallocated, a possible
   1843      1.116     joerg  * block reassembly is checked.
   1844      1.116     joerg  *
   1845      1.116     joerg  * => um_lock not held on entry or exit
   1846      1.116     joerg  */
   1847      1.116     joerg void
   1848      1.116     joerg ffs_blkfree_snap(struct fs *fs, struct vnode *devvp, daddr_t bno, long size,
   1849      1.116     joerg     ino_t inum)
   1850      1.116     joerg {
   1851      1.116     joerg 	struct cg *cgp;
   1852      1.116     joerg 	struct buf *bp;
   1853      1.116     joerg 	struct ufsmount *ump;
   1854      1.116     joerg 	daddr_t cgblkno;
   1855      1.116     joerg 	int error, cg;
   1856      1.116     joerg 	dev_t dev;
   1857      1.116     joerg 	const bool devvp_is_snapshot = (devvp->v_type != VBLK);
   1858      1.118     joerg 	const int needswap = UFS_FSNEEDSWAP(fs);
   1859      1.116     joerg 
   1860      1.116     joerg 	KASSERT(devvp_is_snapshot);
   1861      1.116     joerg 
   1862      1.116     joerg 	cg = dtog(fs, bno);
   1863      1.116     joerg 	dev = VTOI(devvp)->i_devvp->v_rdev;
   1864      1.116     joerg 	ump = VFSTOUFS(devvp->v_mount);
   1865      1.138  dholland 	cgblkno = ffs_fragstoblks(fs, cgtod(fs, cg));
   1866      1.116     joerg 
   1867      1.116     joerg 	error = ffs_check_bad_allocation(__func__, fs, bno, size, dev, inum);
   1868      1.116     joerg 	if (error)
   1869        1.1   mycroft 		return;
   1870      1.116     joerg 
   1871      1.107   hannken 	error = bread(devvp, cgblkno, (int)fs->fs_cgsize,
   1872      1.107   hannken 	    NOCRED, B_MODIFY, &bp);
   1873        1.1   mycroft 	if (error) {
   1874        1.1   mycroft 		return;
   1875        1.1   mycroft 	}
   1876        1.1   mycroft 	cgp = (struct cg *)bp->b_data;
   1877       1.19    bouyer 	if (!cg_chkmagic(cgp, needswap)) {
   1878      1.101        ad 		brelse(bp, 0);
   1879        1.1   mycroft 		return;
   1880        1.1   mycroft 	}
   1881      1.116     joerg 
   1882      1.119     joerg 	ffs_blkfree_common(ump, fs, dev, bp, bno, size, devvp_is_snapshot);
   1883      1.119     joerg 
   1884      1.119     joerg 	bdwrite(bp);
   1885      1.116     joerg }
   1886      1.116     joerg 
   1887      1.116     joerg static void
   1888      1.119     joerg ffs_blkfree_common(struct ufsmount *ump, struct fs *fs, dev_t dev,
   1889      1.119     joerg     struct buf *bp, daddr_t bno, long size, bool devvp_is_snapshot)
   1890      1.116     joerg {
   1891      1.116     joerg 	struct cg *cgp;
   1892      1.116     joerg 	int32_t fragno, cgbno;
   1893      1.116     joerg 	int i, cg, blk, frags, bbase;
   1894      1.116     joerg 	u_int8_t *blksfree;
   1895      1.116     joerg 	const int needswap = UFS_FSNEEDSWAP(fs);
   1896      1.116     joerg 
   1897      1.116     joerg 	cg = dtog(fs, bno);
   1898      1.116     joerg 	cgp = (struct cg *)bp->b_data;
   1899       1.92    kardel 	cgp->cg_old_time = ufs_rw32(time_second, needswap);
   1900       1.73       dbj 	if ((fs->fs_magic != FS_UFS1_MAGIC) ||
   1901       1.73       dbj 	    (fs->fs_old_flags & FS_FLAGS_UPDATED))
   1902       1.92    kardel 		cgp->cg_time = ufs_rw64(time_second, needswap);
   1903       1.60      fvdl 	cgbno = dtogd(fs, bno);
   1904       1.62      fvdl 	blksfree = cg_blksfree(cgp, needswap);
   1905      1.101        ad 	mutex_enter(&ump->um_lock);
   1906        1.1   mycroft 	if (size == fs->fs_bsize) {
   1907      1.138  dholland 		fragno = ffs_fragstoblks(fs, cgbno);
   1908       1.62      fvdl 		if (!ffs_isfreeblock(fs, blksfree, fragno)) {
   1909      1.113   hannken 			if (devvp_is_snapshot) {
   1910      1.101        ad 				mutex_exit(&ump->um_lock);
   1911       1.76   hannken 				return;
   1912       1.76   hannken 			}
   1913      1.120  christos 			printf("dev = 0x%llx, block = %" PRId64 ", fs = %s\n",
   1914      1.120  christos 			    (unsigned long long)dev, bno, fs->fs_fsmnt);
   1915        1.1   mycroft 			panic("blkfree: freeing free block");
   1916        1.1   mycroft 		}
   1917       1.62      fvdl 		ffs_setblock(fs, blksfree, fragno);
   1918       1.60      fvdl 		ffs_clusteracct(fs, cgp, fragno, 1);
   1919       1.19    bouyer 		ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
   1920        1.1   mycroft 		fs->fs_cstotal.cs_nbfree++;
   1921        1.1   mycroft 		fs->fs_cs(fs, cg).cs_nbfree++;
   1922       1.73       dbj 		if ((fs->fs_magic == FS_UFS1_MAGIC) &&
   1923       1.73       dbj 		    ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0)) {
   1924       1.73       dbj 			i = old_cbtocylno(fs, cgbno);
   1925       1.75       dbj 			KASSERT(i >= 0);
   1926       1.75       dbj 			KASSERT(i < fs->fs_old_ncyl);
   1927       1.75       dbj 			KASSERT(old_cbtorpos(fs, cgbno) >= 0);
   1928       1.75       dbj 			KASSERT(fs->fs_old_nrpos == 0 || old_cbtorpos(fs, cgbno) < fs->fs_old_nrpos);
   1929       1.73       dbj 			ufs_add16(old_cg_blks(fs, cgp, i, needswap)[old_cbtorpos(fs, cgbno)], 1,
   1930       1.73       dbj 			    needswap);
   1931       1.73       dbj 			ufs_add32(old_cg_blktot(cgp, needswap)[i], 1, needswap);
   1932       1.73       dbj 		}
   1933        1.1   mycroft 	} else {
   1934      1.138  dholland 		bbase = cgbno - ffs_fragnum(fs, cgbno);
   1935        1.1   mycroft 		/*
   1936        1.1   mycroft 		 * decrement the counts associated with the old frags
   1937        1.1   mycroft 		 */
   1938       1.62      fvdl 		blk = blkmap(fs, blksfree, bbase);
   1939       1.19    bouyer 		ffs_fragacct(fs, blk, cgp->cg_frsum, -1, needswap);
   1940        1.1   mycroft 		/*
   1941        1.1   mycroft 		 * deallocate the fragment
   1942        1.1   mycroft 		 */
   1943      1.137  dholland 		frags = ffs_numfrags(fs, size);
   1944        1.1   mycroft 		for (i = 0; i < frags; i++) {
   1945       1.62      fvdl 			if (isset(blksfree, cgbno + i)) {
   1946      1.120  christos 				printf("dev = 0x%llx, block = %" PRId64
   1947       1.59   tsutsui 				       ", fs = %s\n",
   1948      1.120  christos 				    (unsigned long long)dev, bno + i,
   1949      1.120  christos 				    fs->fs_fsmnt);
   1950        1.1   mycroft 				panic("blkfree: freeing free frag");
   1951        1.1   mycroft 			}
   1952       1.62      fvdl 			setbit(blksfree, cgbno + i);
   1953        1.1   mycroft 		}
   1954       1.19    bouyer 		ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
   1955        1.1   mycroft 		fs->fs_cstotal.cs_nffree += i;
   1956       1.30      fvdl 		fs->fs_cs(fs, cg).cs_nffree += i;
   1957        1.1   mycroft 		/*
   1958        1.1   mycroft 		 * add back in counts associated with the new frags
   1959        1.1   mycroft 		 */
   1960       1.62      fvdl 		blk = blkmap(fs, blksfree, bbase);
   1961       1.19    bouyer 		ffs_fragacct(fs, blk, cgp->cg_frsum, 1, needswap);
   1962        1.1   mycroft 		/*
   1963        1.1   mycroft 		 * if a complete block has been reassembled, account for it
   1964        1.1   mycroft 		 */
   1965      1.138  dholland 		fragno = ffs_fragstoblks(fs, bbase);
   1966       1.62      fvdl 		if (ffs_isblock(fs, blksfree, fragno)) {
   1967       1.19    bouyer 			ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap);
   1968        1.1   mycroft 			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
   1969        1.1   mycroft 			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
   1970       1.60      fvdl 			ffs_clusteracct(fs, cgp, fragno, 1);
   1971       1.19    bouyer 			ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
   1972        1.1   mycroft 			fs->fs_cstotal.cs_nbfree++;
   1973        1.1   mycroft 			fs->fs_cs(fs, cg).cs_nbfree++;
   1974       1.73       dbj 			if ((fs->fs_magic == FS_UFS1_MAGIC) &&
   1975       1.73       dbj 			    ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0)) {
   1976       1.73       dbj 				i = old_cbtocylno(fs, bbase);
   1977       1.75       dbj 				KASSERT(i >= 0);
   1978       1.75       dbj 				KASSERT(i < fs->fs_old_ncyl);
   1979       1.75       dbj 				KASSERT(old_cbtorpos(fs, bbase) >= 0);
   1980       1.75       dbj 				KASSERT(fs->fs_old_nrpos == 0 || old_cbtorpos(fs, bbase) < fs->fs_old_nrpos);
   1981       1.73       dbj 				ufs_add16(old_cg_blks(fs, cgp, i, needswap)[old_cbtorpos(fs,
   1982       1.73       dbj 				    bbase)], 1, needswap);
   1983       1.73       dbj 				ufs_add32(old_cg_blktot(cgp, needswap)[i], 1, needswap);
   1984       1.73       dbj 			}
   1985        1.1   mycroft 		}
   1986        1.1   mycroft 	}
   1987        1.1   mycroft 	fs->fs_fmod = 1;
   1988       1.76   hannken 	ACTIVECG_CLR(fs, cg);
   1989      1.101        ad 	mutex_exit(&ump->um_lock);
   1990        1.1   mycroft }
   1991        1.1   mycroft 
   1992        1.1   mycroft /*
   1993        1.1   mycroft  * Free an inode.
   1994       1.30      fvdl  */
   1995       1.30      fvdl int
   1996       1.88      yamt ffs_vfree(struct vnode *vp, ino_t ino, int mode)
   1997       1.30      fvdl {
   1998       1.30      fvdl 
   1999      1.119     joerg 	return ffs_freefile(vp->v_mount, ino, mode);
   2000       1.30      fvdl }
   2001       1.30      fvdl 
   2002       1.30      fvdl /*
   2003       1.30      fvdl  * Do the actual free operation.
   2004        1.1   mycroft  * The specified inode is placed back in the free map.
   2005      1.111    simonb  *
   2006      1.111    simonb  * => um_lock not held on entry or exit
   2007        1.1   mycroft  */
   2008        1.1   mycroft int
   2009      1.119     joerg ffs_freefile(struct mount *mp, ino_t ino, int mode)
   2010      1.119     joerg {
   2011      1.119     joerg 	struct ufsmount *ump = VFSTOUFS(mp);
   2012      1.119     joerg 	struct fs *fs = ump->um_fs;
   2013      1.119     joerg 	struct vnode *devvp;
   2014      1.119     joerg 	struct cg *cgp;
   2015      1.119     joerg 	struct buf *bp;
   2016      1.119     joerg 	int error, cg;
   2017      1.119     joerg 	daddr_t cgbno;
   2018      1.119     joerg 	dev_t dev;
   2019      1.119     joerg 	const int needswap = UFS_FSNEEDSWAP(fs);
   2020      1.119     joerg 
   2021      1.119     joerg 	cg = ino_to_cg(fs, ino);
   2022      1.119     joerg 	devvp = ump->um_devvp;
   2023      1.119     joerg 	dev = devvp->v_rdev;
   2024      1.136  dholland 	cgbno = FFS_FSBTODB(fs, cgtod(fs, cg));
   2025      1.119     joerg 
   2026      1.119     joerg 	if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg)
   2027      1.120  christos 		panic("ifree: range: dev = 0x%llx, ino = %llu, fs = %s",
   2028      1.120  christos 		    (long long)dev, (unsigned long long)ino, fs->fs_fsmnt);
   2029      1.119     joerg 	error = bread(devvp, cgbno, (int)fs->fs_cgsize,
   2030      1.119     joerg 	    NOCRED, B_MODIFY, &bp);
   2031      1.119     joerg 	if (error) {
   2032      1.119     joerg 		return (error);
   2033      1.119     joerg 	}
   2034      1.119     joerg 	cgp = (struct cg *)bp->b_data;
   2035      1.119     joerg 	if (!cg_chkmagic(cgp, needswap)) {
   2036      1.119     joerg 		brelse(bp, 0);
   2037      1.119     joerg 		return (0);
   2038      1.119     joerg 	}
   2039      1.119     joerg 
   2040      1.119     joerg 	ffs_freefile_common(ump, fs, dev, bp, ino, mode, false);
   2041      1.119     joerg 
   2042      1.119     joerg 	bdwrite(bp);
   2043      1.119     joerg 
   2044      1.119     joerg 	return 0;
   2045      1.119     joerg }
   2046      1.119     joerg 
   2047      1.119     joerg int
   2048      1.119     joerg ffs_freefile_snap(struct fs *fs, struct vnode *devvp, ino_t ino, int mode)
   2049        1.9  christos {
   2050      1.101        ad 	struct ufsmount *ump;
   2051       1.33  augustss 	struct cg *cgp;
   2052        1.1   mycroft 	struct buf *bp;
   2053        1.1   mycroft 	int error, cg;
   2054       1.76   hannken 	daddr_t cgbno;
   2055       1.78   hannken 	dev_t dev;
   2056       1.30      fvdl 	const int needswap = UFS_FSNEEDSWAP(fs);
   2057        1.1   mycroft 
   2058      1.119     joerg 	KASSERT(devvp->v_type != VBLK);
   2059      1.111    simonb 
   2060       1.76   hannken 	cg = ino_to_cg(fs, ino);
   2061      1.119     joerg 	dev = VTOI(devvp)->i_devvp->v_rdev;
   2062      1.119     joerg 	ump = VFSTOUFS(devvp->v_mount);
   2063      1.138  dholland 	cgbno = ffs_fragstoblks(fs, cgtod(fs, cg));
   2064        1.1   mycroft 	if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg)
   2065      1.120  christos 		panic("ifree: range: dev = 0x%llx, ino = %llu, fs = %s",
   2066      1.120  christos 		    (unsigned long long)dev, (unsigned long long)ino,
   2067      1.120  christos 		    fs->fs_fsmnt);
   2068      1.107   hannken 	error = bread(devvp, cgbno, (int)fs->fs_cgsize,
   2069      1.107   hannken 	    NOCRED, B_MODIFY, &bp);
   2070        1.1   mycroft 	if (error) {
   2071       1.30      fvdl 		return (error);
   2072        1.1   mycroft 	}
   2073        1.1   mycroft 	cgp = (struct cg *)bp->b_data;
   2074       1.19    bouyer 	if (!cg_chkmagic(cgp, needswap)) {
   2075      1.101        ad 		brelse(bp, 0);
   2076        1.1   mycroft 		return (0);
   2077        1.1   mycroft 	}
   2078      1.119     joerg 	ffs_freefile_common(ump, fs, dev, bp, ino, mode, true);
   2079      1.119     joerg 
   2080      1.119     joerg 	bdwrite(bp);
   2081      1.119     joerg 
   2082      1.119     joerg 	return 0;
   2083      1.119     joerg }
   2084      1.119     joerg 
   2085      1.119     joerg static void
   2086      1.119     joerg ffs_freefile_common(struct ufsmount *ump, struct fs *fs, dev_t dev,
   2087      1.119     joerg     struct buf *bp, ino_t ino, int mode, bool devvp_is_snapshot)
   2088      1.119     joerg {
   2089      1.119     joerg 	int cg;
   2090      1.119     joerg 	struct cg *cgp;
   2091      1.119     joerg 	u_int8_t *inosused;
   2092      1.119     joerg 	const int needswap = UFS_FSNEEDSWAP(fs);
   2093      1.119     joerg 
   2094      1.119     joerg 	cg = ino_to_cg(fs, ino);
   2095      1.119     joerg 	cgp = (struct cg *)bp->b_data;
   2096       1.92    kardel 	cgp->cg_old_time = ufs_rw32(time_second, needswap);
   2097       1.73       dbj 	if ((fs->fs_magic != FS_UFS1_MAGIC) ||
   2098       1.73       dbj 	    (fs->fs_old_flags & FS_FLAGS_UPDATED))
   2099       1.92    kardel 		cgp->cg_time = ufs_rw64(time_second, needswap);
   2100       1.62      fvdl 	inosused = cg_inosused(cgp, needswap);
   2101        1.1   mycroft 	ino %= fs->fs_ipg;
   2102       1.62      fvdl 	if (isclr(inosused, ino)) {
   2103      1.120  christos 		printf("ifree: dev = 0x%llx, ino = %llu, fs = %s\n",
   2104      1.120  christos 		    (unsigned long long)dev, (unsigned long long)ino +
   2105      1.120  christos 		    cg * fs->fs_ipg, fs->fs_fsmnt);
   2106        1.1   mycroft 		if (fs->fs_ronly == 0)
   2107        1.1   mycroft 			panic("ifree: freeing free inode");
   2108        1.1   mycroft 	}
   2109       1.62      fvdl 	clrbit(inosused, ino);
   2110      1.113   hannken 	if (!devvp_is_snapshot)
   2111      1.119     joerg 		UFS_WAPBL_UNREGISTER_INODE(ump->um_mountp,
   2112      1.113   hannken 		    ino + cg * fs->fs_ipg, mode);
   2113       1.19    bouyer 	if (ino < ufs_rw32(cgp->cg_irotor, needswap))
   2114       1.19    bouyer 		cgp->cg_irotor = ufs_rw32(ino, needswap);
   2115       1.19    bouyer 	ufs_add32(cgp->cg_cs.cs_nifree, 1, needswap);
   2116      1.101        ad 	mutex_enter(&ump->um_lock);
   2117        1.1   mycroft 	fs->fs_cstotal.cs_nifree++;
   2118        1.1   mycroft 	fs->fs_cs(fs, cg).cs_nifree++;
   2119       1.78   hannken 	if ((mode & IFMT) == IFDIR) {
   2120       1.19    bouyer 		ufs_add32(cgp->cg_cs.cs_ndir, -1, needswap);
   2121        1.1   mycroft 		fs->fs_cstotal.cs_ndir--;
   2122        1.1   mycroft 		fs->fs_cs(fs, cg).cs_ndir--;
   2123        1.1   mycroft 	}
   2124        1.1   mycroft 	fs->fs_fmod = 1;
   2125       1.82   hannken 	ACTIVECG_CLR(fs, cg);
   2126      1.101        ad 	mutex_exit(&ump->um_lock);
   2127        1.1   mycroft }
   2128        1.1   mycroft 
   2129        1.1   mycroft /*
   2130       1.76   hannken  * Check to see if a file is free.
   2131       1.76   hannken  */
   2132       1.76   hannken int
   2133       1.85   thorpej ffs_checkfreefile(struct fs *fs, struct vnode *devvp, ino_t ino)
   2134       1.76   hannken {
   2135       1.76   hannken 	struct cg *cgp;
   2136       1.76   hannken 	struct buf *bp;
   2137       1.76   hannken 	daddr_t cgbno;
   2138       1.76   hannken 	int ret, cg;
   2139       1.76   hannken 	u_int8_t *inosused;
   2140      1.113   hannken 	const bool devvp_is_snapshot = (devvp->v_type != VBLK);
   2141       1.76   hannken 
   2142      1.119     joerg 	KASSERT(devvp_is_snapshot);
   2143      1.119     joerg 
   2144       1.76   hannken 	cg = ino_to_cg(fs, ino);
   2145      1.113   hannken 	if (devvp_is_snapshot)
   2146      1.138  dholland 		cgbno = ffs_fragstoblks(fs, cgtod(fs, cg));
   2147      1.113   hannken 	else
   2148      1.136  dholland 		cgbno = FFS_FSBTODB(fs, cgtod(fs, cg));
   2149       1.76   hannken 	if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg)
   2150       1.76   hannken 		return 1;
   2151      1.107   hannken 	if (bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, 0, &bp)) {
   2152       1.76   hannken 		return 1;
   2153       1.76   hannken 	}
   2154       1.76   hannken 	cgp = (struct cg *)bp->b_data;
   2155       1.76   hannken 	if (!cg_chkmagic(cgp, UFS_FSNEEDSWAP(fs))) {
   2156      1.101        ad 		brelse(bp, 0);
   2157       1.76   hannken 		return 1;
   2158       1.76   hannken 	}
   2159       1.76   hannken 	inosused = cg_inosused(cgp, UFS_FSNEEDSWAP(fs));
   2160       1.76   hannken 	ino %= fs->fs_ipg;
   2161       1.76   hannken 	ret = isclr(inosused, ino);
   2162      1.101        ad 	brelse(bp, 0);
   2163       1.76   hannken 	return ret;
   2164       1.76   hannken }
   2165       1.76   hannken 
   2166       1.76   hannken /*
   2167        1.1   mycroft  * Find a block of the specified size in the specified cylinder group.
   2168        1.1   mycroft  *
   2169        1.1   mycroft  * It is a panic if a request is made to find a block if none are
   2170        1.1   mycroft  * available.
   2171        1.1   mycroft  */
   2172       1.60      fvdl static int32_t
   2173       1.85   thorpej ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz)
   2174        1.1   mycroft {
   2175       1.60      fvdl 	int32_t bno;
   2176        1.1   mycroft 	int start, len, loc, i;
   2177        1.1   mycroft 	int blk, field, subfield, pos;
   2178       1.19    bouyer 	int ostart, olen;
   2179       1.62      fvdl 	u_int8_t *blksfree;
   2180       1.30      fvdl 	const int needswap = UFS_FSNEEDSWAP(fs);
   2181        1.1   mycroft 
   2182      1.101        ad 	/* KASSERT(mutex_owned(&ump->um_lock)); */
   2183      1.101        ad 
   2184        1.1   mycroft 	/*
   2185        1.1   mycroft 	 * find the fragment by searching through the free block
   2186        1.1   mycroft 	 * map for an appropriate bit pattern
   2187        1.1   mycroft 	 */
   2188        1.1   mycroft 	if (bpref)
   2189        1.1   mycroft 		start = dtogd(fs, bpref) / NBBY;
   2190        1.1   mycroft 	else
   2191       1.19    bouyer 		start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY;
   2192       1.62      fvdl 	blksfree = cg_blksfree(cgp, needswap);
   2193        1.1   mycroft 	len = howmany(fs->fs_fpg, NBBY) - start;
   2194       1.19    bouyer 	ostart = start;
   2195       1.19    bouyer 	olen = len;
   2196       1.45     lukem 	loc = scanc((u_int)len,
   2197       1.62      fvdl 		(const u_char *)&blksfree[start],
   2198       1.45     lukem 		(const u_char *)fragtbl[fs->fs_frag],
   2199       1.54   mycroft 		(1 << (allocsiz - 1 + (fs->fs_frag & (NBBY - 1)))));
   2200        1.1   mycroft 	if (loc == 0) {
   2201        1.1   mycroft 		len = start + 1;
   2202        1.1   mycroft 		start = 0;
   2203       1.45     lukem 		loc = scanc((u_int)len,
   2204       1.62      fvdl 			(const u_char *)&blksfree[0],
   2205       1.45     lukem 			(const u_char *)fragtbl[fs->fs_frag],
   2206       1.54   mycroft 			(1 << (allocsiz - 1 + (fs->fs_frag & (NBBY - 1)))));
   2207        1.1   mycroft 		if (loc == 0) {
   2208       1.13  christos 			printf("start = %d, len = %d, fs = %s\n",
   2209       1.19    bouyer 			    ostart, olen, fs->fs_fsmnt);
   2210       1.20      ross 			printf("offset=%d %ld\n",
   2211       1.19    bouyer 				ufs_rw32(cgp->cg_freeoff, needswap),
   2212       1.62      fvdl 				(long)blksfree - (long)cgp);
   2213       1.62      fvdl 			printf("cg %d\n", cgp->cg_cgx);
   2214        1.1   mycroft 			panic("ffs_alloccg: map corrupted");
   2215        1.1   mycroft 			/* NOTREACHED */
   2216        1.1   mycroft 		}
   2217        1.1   mycroft 	}
   2218        1.1   mycroft 	bno = (start + len - loc) * NBBY;
   2219       1.19    bouyer 	cgp->cg_frotor = ufs_rw32(bno, needswap);
   2220        1.1   mycroft 	/*
   2221        1.1   mycroft 	 * found the byte in the map
   2222        1.1   mycroft 	 * sift through the bits to find the selected frag
   2223        1.1   mycroft 	 */
   2224        1.1   mycroft 	for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
   2225       1.62      fvdl 		blk = blkmap(fs, blksfree, bno);
   2226        1.1   mycroft 		blk <<= 1;
   2227        1.1   mycroft 		field = around[allocsiz];
   2228        1.1   mycroft 		subfield = inside[allocsiz];
   2229        1.1   mycroft 		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
   2230        1.1   mycroft 			if ((blk & field) == subfield)
   2231        1.1   mycroft 				return (bno + pos);
   2232        1.1   mycroft 			field <<= 1;
   2233        1.1   mycroft 			subfield <<= 1;
   2234        1.1   mycroft 		}
   2235        1.1   mycroft 	}
   2236       1.60      fvdl 	printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt);
   2237        1.1   mycroft 	panic("ffs_alloccg: block not in map");
   2238       1.58      fvdl 	/* return (-1); */
   2239        1.1   mycroft }
   2240        1.1   mycroft 
   2241        1.1   mycroft /*
   2242        1.1   mycroft  * Fserr prints the name of a file system with an error diagnostic.
   2243       1.81     perry  *
   2244        1.1   mycroft  * The form of the error message is:
   2245        1.1   mycroft  *	fs: error message
   2246        1.1   mycroft  */
   2247        1.1   mycroft static void
   2248       1.85   thorpej ffs_fserr(struct fs *fs, u_int uid, const char *cp)
   2249        1.1   mycroft {
   2250        1.1   mycroft 
   2251       1.64  gmcgarry 	log(LOG_ERR, "uid %d, pid %d, command %s, on %s: %s\n",
   2252       1.64  gmcgarry 	    uid, curproc->p_pid, curproc->p_comm, fs->fs_fsmnt, cp);
   2253        1.1   mycroft }
   2254