Home | History | Annotate | Line # | Download | only in ufs
ufs_quota.c revision 1.55
      1 /*	$NetBSD: ufs_quota.c,v 1.55 2008/01/03 19:28:50 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1990, 1993, 1995
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * Robert Elz at The University of Melbourne.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. Neither the name of the University nor the names of its contributors
     19  *    may be used to endorse or promote products derived from this software
     20  *    without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  *
     34  *	@(#)ufs_quota.c	8.5 (Berkeley) 5/20/95
     35  */
     36 
     37 #include <sys/cdefs.h>
     38 __KERNEL_RCSID(0, "$NetBSD: ufs_quota.c,v 1.55 2008/01/03 19:28:50 ad Exp $");
     39 
     40 #include <sys/param.h>
     41 #include <sys/kernel.h>
     42 #include <sys/systm.h>
     43 #include <sys/namei.h>
     44 #include <sys/malloc.h>
     45 #include <sys/file.h>
     46 #include <sys/proc.h>
     47 #include <sys/vnode.h>
     48 #include <sys/mount.h>
     49 #include <sys/kauth.h>
     50 
     51 #include <ufs/ufs/quota.h>
     52 #include <ufs/ufs/inode.h>
     53 #include <ufs/ufs/ufsmount.h>
     54 #include <ufs/ufs/ufs_extern.h>
     55 
     56 /*
     57  * The following structure records disk usage for a user or group on a
     58  * filesystem. There is one allocated for each quota that exists on any
     59  * filesystem for the current user or group. A cache is kept of recently
     60  * used entries.
     61  * Field markings and the corresponding locks:
     62  * h:	dqlock
     63  * d:	dq_interlock
     64  *
     65  * Lock order is: dq_interlock -> dqlock
     66  *                dq_interlock -> dqvp
     67  */
     68 struct dquot {
     69 	LIST_ENTRY(dquot) dq_hash;	/* h: hash list */
     70 	u_int16_t dq_flags;		/* d: flags, see below */
     71 	u_int16_t dq_type;		/* d: quota type of this dquot */
     72 	u_int32_t dq_cnt;		/* h: count of active references */
     73 	u_int32_t dq_id;		/* d: identifier this applies to */
     74 	struct	ufsmount *dq_ump;	/* d: filesystem this is taken from */
     75 	kmutex_t dq_interlock;		/* d: lock this dquot */
     76 	struct	dqblk dq_dqb;		/* d: actual usage & quotas */
     77 };
     78 /*
     79  * Flag values.
     80  */
     81 #define	DQ_MOD		0x04		/* this quota modified since read */
     82 #define	DQ_FAKE		0x08		/* no limits here, just usage */
     83 #define	DQ_BLKS		0x10		/* has been warned about blk limit */
     84 #define	DQ_INODS	0x20		/* has been warned about inode limit */
     85 /*
     86  * Shorthand notation.
     87  */
     88 #define	dq_bhardlimit	dq_dqb.dqb_bhardlimit
     89 #define	dq_bsoftlimit	dq_dqb.dqb_bsoftlimit
     90 #define	dq_curblocks	dq_dqb.dqb_curblocks
     91 #define	dq_ihardlimit	dq_dqb.dqb_ihardlimit
     92 #define	dq_isoftlimit	dq_dqb.dqb_isoftlimit
     93 #define	dq_curinodes	dq_dqb.dqb_curinodes
     94 #define	dq_btime	dq_dqb.dqb_btime
     95 #define	dq_itime	dq_dqb.dqb_itime
     96 /*
     97  * If the system has never checked for a quota for this file, then it is
     98  * set to NODQUOT.  Once a write attempt is made the inode pointer is set
     99  * to reference a dquot structure.
    100  */
    101 #define	NODQUOT		NULL
    102 
    103 static int chkdqchg(struct inode *, int64_t, kauth_cred_t, int);
    104 static int chkiqchg(struct inode *, int32_t, kauth_cred_t, int);
    105 #ifdef DIAGNOSTIC
    106 static void dqflush(struct vnode *);
    107 #endif
    108 static int dqget(struct vnode *, u_long, struct ufsmount *, int,
    109 		 struct dquot **);
    110 static void dqref(struct dquot *);
    111 static void dqrele(struct vnode *, struct dquot *);
    112 static int dqsync(struct vnode *, struct dquot *);
    113 
    114 static kmutex_t dqlock;
    115 static kcondvar_t dqcv;
    116 /*
    117  * Quota name to error message mapping.
    118  */
    119 static const char *quotatypes[] = INITQFNAMES;
    120 
    121 /*
    122  * Set up the quotas for an inode.
    123  *
    124  * This routine completely defines the semantics of quotas.
    125  * If other criterion want to be used to establish quotas, the
    126  * MAXQUOTAS value in quotas.h should be increased, and the
    127  * additional dquots set up here.
    128  */
    129 int
    130 getinoquota(struct inode *ip)
    131 {
    132 	struct ufsmount *ump = ip->i_ump;
    133 	struct vnode *vp = ITOV(ip);
    134 	int i, error;
    135 	u_int32_t ino_ids[MAXQUOTAS];
    136 
    137 	/*
    138 	 * To avoid deadlocks never update quotas for quota files
    139 	 * on the same file system
    140 	 */
    141 	for (i = 0; i < MAXQUOTAS; i++)
    142 		if (ITOV(ip) == ump->um_quotas[i])
    143 			return 0;
    144 
    145 	ino_ids[USRQUOTA] = ip->i_uid;
    146 	ino_ids[GRPQUOTA] = ip->i_gid;
    147 	for (i = 0; i < MAXQUOTAS; i++) {
    148 		/*
    149 		 * If the file id changed the quota needs update.
    150 		 */
    151 		if (ip->i_dquot[i] != NODQUOT &&
    152 		    ip->i_dquot[i]->dq_id != ino_ids[i]) {
    153 			dqrele(ITOV(ip), ip->i_dquot[i]);
    154 			ip->i_dquot[i] = NODQUOT;
    155 		}
    156 		/*
    157 		 * Set up the quota based on file id.
    158 		 * EINVAL means that quotas are not enabled.
    159 		 */
    160 		if (ip->i_dquot[i] == NODQUOT &&
    161 		    (error = dqget(vp, ino_ids[i], ump, i, &ip->i_dquot[i])) &&
    162 		    error != EINVAL)
    163 			return (error);
    164 	}
    165 	return 0;
    166 }
    167 
    168 /*
    169  * Initialize the quota fields of an inode.
    170  */
    171 void
    172 ufsquota_init(struct inode *ip)
    173 {
    174 	int i;
    175 
    176 	for (i = 0; i < MAXQUOTAS; i++)
    177 		ip->i_dquot[i] = NODQUOT;
    178 }
    179 
    180 /*
    181  * Release the quota fields from an inode.
    182  */
    183 void
    184 ufsquota_free(struct inode *ip)
    185 {
    186 	int i;
    187 
    188 	for (i = 0; i < MAXQUOTAS; i++) {
    189 		dqrele(ITOV(ip), ip->i_dquot[i]);
    190 		ip->i_dquot[i] = NODQUOT;
    191 	}
    192 }
    193 
    194 /*
    195  * Update disk usage, and take corrective action.
    196  */
    197 int
    198 chkdq(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
    199 {
    200 	struct dquot *dq;
    201 	int i;
    202 	int ncurblocks, error;
    203 
    204 	if ((error = getinoquota(ip)) != 0)
    205 		return error;
    206 	if (change == 0)
    207 		return (0);
    208 	if (change < 0) {
    209 		for (i = 0; i < MAXQUOTAS; i++) {
    210 			if ((dq = ip->i_dquot[i]) == NODQUOT)
    211 				continue;
    212 			mutex_enter(&dq->dq_interlock);
    213 			ncurblocks = dq->dq_curblocks + change;
    214 			if (ncurblocks >= 0)
    215 				dq->dq_curblocks = ncurblocks;
    216 			else
    217 				dq->dq_curblocks = 0;
    218 			dq->dq_flags &= ~DQ_BLKS;
    219 			dq->dq_flags |= DQ_MOD;
    220 			mutex_exit(&dq->dq_interlock);
    221 		}
    222 		return (0);
    223 	}
    224 	if ((flags & FORCE) == 0 &&
    225 	    kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL) != 0) {
    226 		for (i = 0; i < MAXQUOTAS; i++) {
    227 			if ((dq = ip->i_dquot[i]) == NODQUOT)
    228 				continue;
    229 			mutex_enter(&dq->dq_interlock);
    230 			error = chkdqchg(ip, change, cred, i);
    231 			mutex_exit(&dq->dq_interlock);
    232 			if (error != 0)
    233 				return (error);
    234 		}
    235 	}
    236 	for (i = 0; i < MAXQUOTAS; i++) {
    237 		if ((dq = ip->i_dquot[i]) == NODQUOT)
    238 			continue;
    239 		mutex_enter(&dq->dq_interlock);
    240 		dq->dq_curblocks += change;
    241 		dq->dq_flags |= DQ_MOD;
    242 		mutex_exit(&dq->dq_interlock);
    243 	}
    244 	return (0);
    245 }
    246 
    247 /*
    248  * Check for a valid change to a users allocation.
    249  * Issue an error message if appropriate.
    250  */
    251 static int
    252 chkdqchg(struct inode *ip, int64_t change, kauth_cred_t cred, int type)
    253 {
    254 	struct dquot *dq = ip->i_dquot[type];
    255 	long ncurblocks = dq->dq_curblocks + change;
    256 
    257 	KASSERT(mutex_owned(&dq->dq_interlock));
    258 	/*
    259 	 * If user would exceed their hard limit, disallow space allocation.
    260 	 */
    261 	if (ncurblocks >= dq->dq_bhardlimit && dq->dq_bhardlimit) {
    262 		if ((dq->dq_flags & DQ_BLKS) == 0 &&
    263 		    ip->i_uid == kauth_cred_geteuid(cred)) {
    264 			uprintf("\n%s: write failed, %s disk limit reached\n",
    265 			    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
    266 			    quotatypes[type]);
    267 			dq->dq_flags |= DQ_BLKS;
    268 		}
    269 		return (EDQUOT);
    270 	}
    271 	/*
    272 	 * If user is over their soft limit for too long, disallow space
    273 	 * allocation. Reset time limit as they cross their soft limit.
    274 	 */
    275 	if (ncurblocks >= dq->dq_bsoftlimit && dq->dq_bsoftlimit) {
    276 		if (dq->dq_curblocks < dq->dq_bsoftlimit) {
    277 			dq->dq_btime = time_second + ip->i_ump->um_btime[type];
    278 			if (ip->i_uid == kauth_cred_geteuid(cred))
    279 				uprintf("\n%s: warning, %s %s\n",
    280 				    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
    281 				    quotatypes[type], "disk quota exceeded");
    282 			return (0);
    283 		}
    284 		if (time_second > dq->dq_btime) {
    285 			if ((dq->dq_flags & DQ_BLKS) == 0 &&
    286 			    ip->i_uid == kauth_cred_geteuid(cred)) {
    287 				uprintf("\n%s: write failed, %s %s\n",
    288 				    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
    289 				    quotatypes[type],
    290 				    "disk quota exceeded for too long");
    291 				dq->dq_flags |= DQ_BLKS;
    292 			}
    293 			return (EDQUOT);
    294 		}
    295 	}
    296 	return (0);
    297 }
    298 
    299 /*
    300  * Check the inode limit, applying corrective action.
    301  */
    302 int
    303 chkiq(struct inode *ip, int32_t change, kauth_cred_t cred, int flags)
    304 {
    305 	struct dquot *dq;
    306 	int i;
    307 	int ncurinodes, error;
    308 
    309 	if ((error = getinoquota(ip)) != 0)
    310 		return error;
    311 	if (change == 0)
    312 		return (0);
    313 	if (change < 0) {
    314 		for (i = 0; i < MAXQUOTAS; i++) {
    315 			if ((dq = ip->i_dquot[i]) == NODQUOT)
    316 				continue;
    317 			mutex_enter(&dq->dq_interlock);
    318 			ncurinodes = dq->dq_curinodes + change;
    319 			if (ncurinodes >= 0)
    320 				dq->dq_curinodes = ncurinodes;
    321 			else
    322 				dq->dq_curinodes = 0;
    323 			dq->dq_flags &= ~DQ_INODS;
    324 			dq->dq_flags |= DQ_MOD;
    325 			mutex_exit(&dq->dq_interlock);
    326 		}
    327 		return (0);
    328 	}
    329 	if ((flags & FORCE) == 0 && kauth_authorize_generic(cred,
    330 	    KAUTH_GENERIC_ISSUSER, NULL) != 0) {
    331 		for (i = 0; i < MAXQUOTAS; i++) {
    332 			if ((dq = ip->i_dquot[i]) == NODQUOT)
    333 				continue;
    334 			mutex_enter(&dq->dq_interlock);
    335 			error = chkiqchg(ip, change, cred, i);
    336 			mutex_exit(&dq->dq_interlock);
    337 			if (error != 0)
    338 				return (error);
    339 		}
    340 	}
    341 	for (i = 0; i < MAXQUOTAS; i++) {
    342 		if ((dq = ip->i_dquot[i]) == NODQUOT)
    343 			continue;
    344 		mutex_enter(&dq->dq_interlock);
    345 		dq->dq_curinodes += change;
    346 		dq->dq_flags |= DQ_MOD;
    347 		mutex_exit(&dq->dq_interlock);
    348 	}
    349 	return (0);
    350 }
    351 
    352 /*
    353  * Check for a valid change to a users allocation.
    354  * Issue an error message if appropriate.
    355  */
    356 static int
    357 chkiqchg(struct inode *ip, int32_t change, kauth_cred_t cred, int type)
    358 {
    359 	struct dquot *dq = ip->i_dquot[type];
    360 	long ncurinodes = dq->dq_curinodes + change;
    361 
    362 	KASSERT(mutex_owned(&dq->dq_interlock));
    363 	/*
    364 	 * If user would exceed their hard limit, disallow inode allocation.
    365 	 */
    366 	if (ncurinodes >= dq->dq_ihardlimit && dq->dq_ihardlimit) {
    367 		if ((dq->dq_flags & DQ_INODS) == 0 &&
    368 		    ip->i_uid == kauth_cred_geteuid(cred)) {
    369 			uprintf("\n%s: write failed, %s inode limit reached\n",
    370 			    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
    371 			    quotatypes[type]);
    372 			dq->dq_flags |= DQ_INODS;
    373 		}
    374 		return (EDQUOT);
    375 	}
    376 	/*
    377 	 * If user is over their soft limit for too long, disallow inode
    378 	 * allocation. Reset time limit as they cross their soft limit.
    379 	 */
    380 	if (ncurinodes >= dq->dq_isoftlimit && dq->dq_isoftlimit) {
    381 		if (dq->dq_curinodes < dq->dq_isoftlimit) {
    382 			dq->dq_itime = time_second + ip->i_ump->um_itime[type];
    383 			if (ip->i_uid == kauth_cred_geteuid(cred))
    384 				uprintf("\n%s: warning, %s %s\n",
    385 				    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
    386 				    quotatypes[type], "inode quota exceeded");
    387 			return (0);
    388 		}
    389 		if (time_second > dq->dq_itime) {
    390 			if ((dq->dq_flags & DQ_INODS) == 0 &&
    391 			    ip->i_uid == kauth_cred_geteuid(cred)) {
    392 				uprintf("\n%s: write failed, %s %s\n",
    393 				    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
    394 				    quotatypes[type],
    395 				    "inode quota exceeded for too long");
    396 				dq->dq_flags |= DQ_INODS;
    397 			}
    398 			return (EDQUOT);
    399 		}
    400 	}
    401 	return (0);
    402 }
    403 
    404 /*
    405  * Code to process quotactl commands.
    406  */
    407 
    408 /*
    409  * Q_QUOTAON - set up a quota file for a particular file system.
    410  */
    411 int
    412 quotaon(struct lwp *l, struct mount *mp, int type, void *fname)
    413 {
    414 	struct ufsmount *ump = VFSTOUFS(mp);
    415 	struct vnode *vp, **vpp, *mvp;
    416 	struct dquot *dq;
    417 	int error;
    418 	struct nameidata nd;
    419 
    420 	vpp = &ump->um_quotas[type];
    421 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname);
    422 	if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0)
    423 		return (error);
    424 	vp = nd.ni_vp;
    425 	VOP_UNLOCK(vp, 0);
    426 	if (vp->v_type != VREG) {
    427 		(void) vn_close(vp, FREAD|FWRITE, l->l_cred, l);
    428 		return (EACCES);
    429 	}
    430 	if (*vpp != vp)
    431 		quotaoff(l, mp, type);
    432 	mutex_enter(&dqlock);
    433 	while ((ump->um_qflags[type] & (QTF_CLOSING | QTF_OPENING)) != 0)
    434 		cv_wait(&dqcv, &dqlock);
    435 	ump->um_qflags[type] |= QTF_OPENING;
    436 	mutex_exit(&dqlock);
    437 	mp->mnt_flag |= MNT_QUOTA;
    438 	vp->v_vflag |= VV_SYSTEM;	/* XXXSMP */
    439 	*vpp = vp;
    440 	/*
    441 	 * Save the credential of the process that turned on quotas.
    442 	 * Set up the time limits for this quota.
    443 	 */
    444 	kauth_cred_hold(l->l_cred);
    445 	ump->um_cred[type] = l->l_cred;
    446 	ump->um_btime[type] = MAX_DQ_TIME;
    447 	ump->um_itime[type] = MAX_IQ_TIME;
    448 	if (dqget(NULLVP, 0, ump, type, &dq) == 0) {
    449 		if (dq->dq_btime > 0)
    450 			ump->um_btime[type] = dq->dq_btime;
    451 		if (dq->dq_itime > 0)
    452 			ump->um_itime[type] = dq->dq_itime;
    453 		dqrele(NULLVP, dq);
    454 	}
    455 	/* Allocate a marker vnode. */
    456 	if ((mvp = vnalloc(mp)) == NULL) {
    457 		error = ENOMEM;
    458 		goto out;
    459 	}
    460 	/*
    461 	 * Search vnodes associated with this mount point,
    462 	 * adding references to quota file being opened.
    463 	 * NB: only need to add dquot's for inodes being modified.
    464 	 */
    465 	mutex_enter(&mntvnode_lock);
    466 again:
    467 	for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
    468 		vmark(mvp, vp);
    469 		mutex_enter(&vp->v_interlock);
    470 		if (vp->v_mount != mp || vismarker(vp) ||
    471 		    vp->v_type == VNON || vp->v_writecount == 0) {
    472 			mutex_exit(&vp->v_interlock);
    473 			continue;
    474 		}
    475 		mutex_exit(&mntvnode_lock);
    476 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
    477 			mutex_enter(&mntvnode_lock);
    478 			(void)vunmark(mvp);
    479 			goto again;
    480 		}
    481 		if ((error = getinoquota(VTOI(vp))) != 0) {
    482 			vput(vp);
    483 			mutex_enter(&mntvnode_lock);
    484 			(void)vunmark(mvp);
    485 			break;
    486 		}
    487 		vput(vp);
    488 	}
    489 	mutex_exit(&mntvnode_lock);
    490 	vnfree(mvp);
    491  out:
    492 	mutex_enter(&dqlock);
    493 	ump->um_qflags[type] &= ~QTF_OPENING;
    494 	cv_broadcast(&dqcv);
    495 	mutex_exit(&dqlock);
    496 	if (error)
    497 		quotaoff(l, mp, type);
    498 	return (error);
    499 }
    500 
    501 /*
    502  * Q_QUOTAOFF - turn off disk quotas for a filesystem.
    503  */
    504 int
    505 quotaoff(struct lwp *l, struct mount *mp, int type)
    506 {
    507 	struct vnode *vp;
    508 	struct vnode *qvp, *mvp;
    509 	struct ufsmount *ump = VFSTOUFS(mp);
    510 	struct dquot *dq;
    511 	struct inode *ip;
    512 	kauth_cred_t cred;
    513 	int i, error;
    514 
    515 	/* Allocate a marker vnode. */
    516 	if ((mvp = vnalloc(mp)) == NULL)
    517 		return ENOMEM;
    518 
    519 	mutex_enter(&dqlock);
    520 	while ((ump->um_qflags[type] & (QTF_CLOSING | QTF_OPENING)) != 0)
    521 		cv_wait(&dqcv, &dqlock);
    522 	if ((qvp = ump->um_quotas[type]) == NULLVP) {
    523 		mutex_exit(&dqlock);
    524 		vnfree(mvp);
    525 		return (0);
    526 	}
    527 	ump->um_qflags[type] |= QTF_CLOSING;
    528 	mutex_exit(&dqlock);
    529 	/*
    530 	 * Search vnodes associated with this mount point,
    531 	 * deleting any references to quota file being closed.
    532 	 */
    533 	mutex_enter(&mntvnode_lock);
    534 again:
    535 	for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
    536 		vmark(mvp, vp);
    537 		mutex_enter(&vp->v_interlock);
    538 		if (vp->v_mount != mp || vismarker(vp) || vp->v_type == VNON) {
    539 			mutex_exit(&vp->v_interlock);
    540 			continue;
    541 		}
    542 		mutex_exit(&mntvnode_lock);
    543 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
    544 			mutex_enter(&mntvnode_lock);
    545 			(void)vunmark(mvp);
    546 			goto again;
    547 		}
    548 		ip = VTOI(vp);
    549 		dq = ip->i_dquot[type];
    550 		ip->i_dquot[type] = NODQUOT;
    551 		dqrele(vp, dq);
    552 		vput(vp);
    553 		mutex_enter(&mntvnode_lock);
    554 	}
    555 	mutex_exit(&mntvnode_lock);
    556 #ifdef DIAGNOSTIC
    557 	dqflush(qvp);
    558 #endif
    559 	qvp->v_vflag &= ~VV_SYSTEM;
    560 	error = vn_close(qvp, FREAD|FWRITE, l->l_cred, l);
    561 	mutex_enter(&dqlock);
    562 	ump->um_quotas[type] = NULLVP;
    563 	cred = ump->um_cred[type];
    564 	ump->um_cred[type] = NOCRED;
    565 	for (i = 0; i < MAXQUOTAS; i++)
    566 		if (ump->um_quotas[i] != NULLVP)
    567 			break;
    568 	ump->um_qflags[type] &= ~QTF_CLOSING;
    569 	cv_broadcast(&dqcv);
    570 	mutex_exit(&dqlock);
    571 	kauth_cred_free(cred);
    572 	if (i == MAXQUOTAS)
    573 		mp->mnt_flag &= ~MNT_QUOTA;
    574 	return (error);
    575 }
    576 
    577 /*
    578  * Q_GETQUOTA - return current values in a dqblk structure.
    579  */
    580 int
    581 getquota(struct mount *mp, u_long id, int type, void *addr)
    582 {
    583 	struct dquot *dq;
    584 	int error;
    585 
    586 	if ((error = dqget(NULLVP, id, VFSTOUFS(mp), type, &dq)) != 0)
    587 		return (error);
    588 	error = copyout((void *)&dq->dq_dqb, addr, sizeof (struct dqblk));
    589 	dqrele(NULLVP, dq);
    590 	return (error);
    591 }
    592 
    593 /*
    594  * Q_SETQUOTA - assign an entire dqblk structure.
    595  */
    596 int
    597 setquota(struct mount *mp, u_long id, int type, void *addr)
    598 {
    599 	struct dquot *dq;
    600 	struct dquot *ndq;
    601 	struct ufsmount *ump = VFSTOUFS(mp);
    602 	struct dqblk newlim;
    603 	int error;
    604 
    605 	error = copyin(addr, (void *)&newlim, sizeof (struct dqblk));
    606 	if (error)
    607 		return (error);
    608 	if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0)
    609 		return (error);
    610 	dq = ndq;
    611 	mutex_enter(&dq->dq_interlock);
    612 	/*
    613 	 * Copy all but the current values.
    614 	 * Reset time limit if previously had no soft limit or were
    615 	 * under it, but now have a soft limit and are over it.
    616 	 */
    617 	newlim.dqb_curblocks = dq->dq_curblocks;
    618 	newlim.dqb_curinodes = dq->dq_curinodes;
    619 	if (dq->dq_id != 0) {
    620 		newlim.dqb_btime = dq->dq_btime;
    621 		newlim.dqb_itime = dq->dq_itime;
    622 	}
    623 	if (newlim.dqb_bsoftlimit &&
    624 	    dq->dq_curblocks >= newlim.dqb_bsoftlimit &&
    625 	    (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit))
    626 		newlim.dqb_btime = time_second + ump->um_btime[type];
    627 	if (newlim.dqb_isoftlimit &&
    628 	    dq->dq_curinodes >= newlim.dqb_isoftlimit &&
    629 	    (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit))
    630 		newlim.dqb_itime = time_second + ump->um_itime[type];
    631 	dq->dq_dqb = newlim;
    632 	if (dq->dq_curblocks < dq->dq_bsoftlimit)
    633 		dq->dq_flags &= ~DQ_BLKS;
    634 	if (dq->dq_curinodes < dq->dq_isoftlimit)
    635 		dq->dq_flags &= ~DQ_INODS;
    636 	if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
    637 	    dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
    638 		dq->dq_flags |= DQ_FAKE;
    639 	else
    640 		dq->dq_flags &= ~DQ_FAKE;
    641 	dq->dq_flags |= DQ_MOD;
    642 	mutex_exit(&dq->dq_interlock);
    643 	dqrele(NULLVP, dq);
    644 	return (0);
    645 }
    646 
    647 /*
    648  * Q_SETUSE - set current inode and block usage.
    649  */
    650 int
    651 setuse(struct mount *mp, u_long id, int type, void *addr)
    652 {
    653 	struct dquot *dq;
    654 	struct ufsmount *ump = VFSTOUFS(mp);
    655 	struct dquot *ndq;
    656 	struct dqblk usage;
    657 	int error;
    658 
    659 	error = copyin(addr, (void *)&usage, sizeof (struct dqblk));
    660 	if (error)
    661 		return (error);
    662 	if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0)
    663 		return (error);
    664 	dq = ndq;
    665 	mutex_enter(&dq->dq_interlock);
    666 	/*
    667 	 * Reset time limit if have a soft limit and were
    668 	 * previously under it, but are now over it.
    669 	 */
    670 	if (dq->dq_bsoftlimit && dq->dq_curblocks < dq->dq_bsoftlimit &&
    671 	    usage.dqb_curblocks >= dq->dq_bsoftlimit)
    672 		dq->dq_btime = time_second + ump->um_btime[type];
    673 	if (dq->dq_isoftlimit && dq->dq_curinodes < dq->dq_isoftlimit &&
    674 	    usage.dqb_curinodes >= dq->dq_isoftlimit)
    675 		dq->dq_itime = time_second + ump->um_itime[type];
    676 	dq->dq_curblocks = usage.dqb_curblocks;
    677 	dq->dq_curinodes = usage.dqb_curinodes;
    678 	if (dq->dq_curblocks < dq->dq_bsoftlimit)
    679 		dq->dq_flags &= ~DQ_BLKS;
    680 	if (dq->dq_curinodes < dq->dq_isoftlimit)
    681 		dq->dq_flags &= ~DQ_INODS;
    682 	dq->dq_flags |= DQ_MOD;
    683 	mutex_exit(&dq->dq_interlock);
    684 	dqrele(NULLVP, dq);
    685 	return (0);
    686 }
    687 
    688 /*
    689  * Q_SYNC - sync quota files to disk.
    690  */
    691 int
    692 qsync(struct mount *mp)
    693 {
    694 	struct ufsmount *ump = VFSTOUFS(mp);
    695 	struct vnode *vp, *mvp;
    696 	struct dquot *dq;
    697 	int i, error;
    698 
    699 	/*
    700 	 * Check if the mount point has any quotas.
    701 	 * If not, simply return.
    702 	 */
    703 	for (i = 0; i < MAXQUOTAS; i++)
    704 		if (ump->um_quotas[i] != NULLVP)
    705 			break;
    706 	if (i == MAXQUOTAS)
    707 		return (0);
    708 
    709 	/* Allocate a marker vnode. */
    710 	if ((mvp = vnalloc(mp)) == NULL)
    711 		return (ENOMEM);
    712 
    713 	/*
    714 	 * Search vnodes associated with this mount point,
    715 	 * synchronizing any modified dquot structures.
    716 	 */
    717 	mutex_enter(&mntvnode_lock);
    718  again:
    719 	for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
    720 		vmark(mvp, vp);
    721 		mutex_enter(&vp->v_interlock);
    722 		if (vp->v_mount != mp || vismarker(vp) || vp->v_type == VNON) {
    723 			mutex_exit(&vp->v_interlock);
    724 			continue;
    725 		}
    726 		mutex_exit(&mntvnode_lock);
    727 		error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
    728 		if (error) {
    729 			mutex_enter(&mntvnode_lock);
    730 			if (error == ENOENT) {
    731 				(void)vunmark(mvp);
    732 				goto again;
    733 			}
    734 			continue;
    735 		}
    736 		for (i = 0; i < MAXQUOTAS; i++) {
    737 			dq = VTOI(vp)->i_dquot[i];
    738 			if (dq == NODQUOT)
    739 				continue;
    740 			mutex_enter(&dq->dq_interlock);
    741 			if (dq->dq_flags & DQ_MOD)
    742 				dqsync(vp, dq);
    743 			mutex_exit(&dq->dq_interlock);
    744 		}
    745 		vput(vp);
    746 		mutex_enter(&mntvnode_lock);
    747 	}
    748 	mutex_exit(&mntvnode_lock);
    749 	vnfree(mvp);
    750 	return (0);
    751 }
    752 
    753 /*
    754  * Code pertaining to management of the in-core dquot data structures.
    755  */
    756 #define DQHASH(dqvp, id) \
    757 	(((((long)(dqvp)) >> 8) + id) & dqhash)
    758 static LIST_HEAD(dqhashhead, dquot) *dqhashtbl;
    759 static u_long dqhash;
    760 static pool_cache_t dquot_cache;
    761 
    762 MALLOC_JUSTDEFINE(M_DQUOT, "UFS quota", "UFS quota entries");
    763 
    764 /*
    765  * Initialize the quota system.
    766  */
    767 void
    768 dqinit(void)
    769 {
    770 
    771 	mutex_init(&dqlock, MUTEX_DEFAULT, IPL_NONE);
    772 	cv_init(&dqcv, "quota");
    773 	malloc_type_attach(M_DQUOT);
    774 	dqhashtbl =
    775 	    hashinit(desiredvnodes, HASH_LIST, M_DQUOT, M_WAITOK, &dqhash);
    776 	dquot_cache = pool_cache_init(sizeof(struct dquot), 0, 0, 0, "ufsdq",
    777 	    NULL, IPL_NONE, NULL, NULL, NULL);
    778 }
    779 
    780 void
    781 dqreinit(void)
    782 {
    783 	struct dquot *dq;
    784 	struct dqhashhead *oldhash, *hash;
    785 	struct vnode *dqvp;
    786 	u_long oldmask, mask, hashval;
    787 	int i;
    788 
    789 	hash = hashinit(desiredvnodes, HASH_LIST, M_DQUOT, M_WAITOK, &mask);
    790 	mutex_enter(&dqlock);
    791 	oldhash = dqhashtbl;
    792 	oldmask = dqhash;
    793 	dqhashtbl = hash;
    794 	dqhash = mask;
    795 	for (i = 0; i <= oldmask; i++) {
    796 		while ((dq = LIST_FIRST(&oldhash[i])) != NULL) {
    797 			dqvp = dq->dq_ump->um_quotas[dq->dq_type];
    798 			LIST_REMOVE(dq, dq_hash);
    799 			hashval = DQHASH(dqvp, dq->dq_id);
    800 			LIST_INSERT_HEAD(&dqhashtbl[hashval], dq, dq_hash);
    801 		}
    802 	}
    803 	mutex_exit(&dqlock);
    804 	hashdone(oldhash, M_DQUOT);
    805 }
    806 
    807 /*
    808  * Free resources held by quota system.
    809  */
    810 void
    811 dqdone(void)
    812 {
    813 
    814 	pool_cache_destroy(dquot_cache);
    815 	hashdone(dqhashtbl, M_DQUOT);
    816 	malloc_type_detach(M_DQUOT);
    817 	cv_destroy(&dqcv);
    818 	mutex_destroy(&dqlock);
    819 }
    820 
    821 /*
    822  * Obtain a dquot structure for the specified identifier and quota file
    823  * reading the information from the file if necessary.
    824  */
    825 static int
    826 dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type,
    827     struct dquot **dqp)
    828 {
    829 	struct dquot *dq, *ndq;
    830 	struct dqhashhead *dqh;
    831 	struct vnode *dqvp;
    832 	struct iovec aiov;
    833 	struct uio auio;
    834 	int error;
    835 
    836 	/* Lock to see an up to date value for QTF_CLOSING. */
    837 	mutex_enter(&dqlock);
    838 	dqvp = ump->um_quotas[type];
    839 	if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) {
    840 		mutex_exit(&dqlock);
    841 		*dqp = NODQUOT;
    842 		return (EINVAL);
    843 	}
    844 	KASSERT(dqvp != vp);
    845 	/*
    846 	 * Check the cache first.
    847 	 */
    848 	dqh = &dqhashtbl[DQHASH(dqvp, id)];
    849 	LIST_FOREACH(dq, dqh, dq_hash) {
    850 		if (dq->dq_id != id ||
    851 		    dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
    852 			continue;
    853 		KASSERT(dq->dq_cnt > 0);
    854 		dqref(dq);
    855 		mutex_exit(&dqlock);
    856 		*dqp = dq;
    857 		return (0);
    858 	}
    859 	/*
    860 	 * Not in cache, allocate a new one.
    861 	 */
    862 	mutex_exit(&dqlock);
    863 	ndq = pool_cache_get(dquot_cache, PR_WAITOK);
    864 	/*
    865 	 * Initialize the contents of the dquot structure.
    866 	 */
    867 	memset((char *)ndq, 0, sizeof *ndq);
    868 	ndq->dq_flags = 0;
    869 	ndq->dq_id = id;
    870 	ndq->dq_ump = ump;
    871 	ndq->dq_type = type;
    872 	mutex_init(&ndq->dq_interlock, MUTEX_DEFAULT, IPL_NONE);
    873 	mutex_enter(&dqlock);
    874 	dqh = &dqhashtbl[DQHASH(dqvp, id)];
    875 	LIST_FOREACH(dq, dqh, dq_hash) {
    876 		if (dq->dq_id != id ||
    877 		    dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
    878 			continue;
    879 		/*
    880 		 * Another thread beat us allocating this dquot.
    881 		 */
    882 		KASSERT(dq->dq_cnt > 0);
    883 		dqref(dq);
    884 		mutex_exit(&dqlock);
    885 		pool_cache_put(dquot_cache, ndq);
    886 		*dqp = dq;
    887 		return 0;
    888 	}
    889 	dq = ndq;
    890 	LIST_INSERT_HEAD(dqh, dq, dq_hash);
    891 	dqref(dq);
    892 	mutex_enter(&dq->dq_interlock);
    893 	mutex_exit(&dqlock);
    894 	vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
    895 	auio.uio_iov = &aiov;
    896 	auio.uio_iovcnt = 1;
    897 	aiov.iov_base = (void *)&dq->dq_dqb;
    898 	aiov.iov_len = sizeof (struct dqblk);
    899 	auio.uio_resid = sizeof (struct dqblk);
    900 	auio.uio_offset = (off_t)(id * sizeof (struct dqblk));
    901 	auio.uio_rw = UIO_READ;
    902 	UIO_SETUP_SYSSPACE(&auio);
    903 	error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
    904 	if (auio.uio_resid == sizeof(struct dqblk) && error == 0)
    905 		memset((void *)&dq->dq_dqb, 0, sizeof(struct dqblk));
    906 	VOP_UNLOCK(dqvp, 0);
    907 	/*
    908 	 * I/O error in reading quota file, release
    909 	 * quota structure and reflect problem to caller.
    910 	 */
    911 	if (error) {
    912 		mutex_enter(&dqlock);
    913 		LIST_REMOVE(dq, dq_hash);
    914 		mutex_exit(&dqlock);
    915 		mutex_exit(&dq->dq_interlock);
    916 		dqrele(vp, dq);
    917 		*dqp = NODQUOT;
    918 		return (error);
    919 	}
    920 	/*
    921 	 * Check for no limit to enforce.
    922 	 * Initialize time values if necessary.
    923 	 */
    924 	if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
    925 	    dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
    926 		dq->dq_flags |= DQ_FAKE;
    927 	if (dq->dq_id != 0) {
    928 		if (dq->dq_btime == 0)
    929 			dq->dq_btime = time_second + ump->um_btime[type];
    930 		if (dq->dq_itime == 0)
    931 			dq->dq_itime = time_second + ump->um_itime[type];
    932 	}
    933 	mutex_exit(&dq->dq_interlock);
    934 	*dqp = dq;
    935 	return (0);
    936 }
    937 
    938 /*
    939  * Obtain a reference to a dquot.
    940  */
    941 static void
    942 dqref(struct dquot *dq)
    943 {
    944 
    945 	KASSERT(mutex_owned(&dqlock));
    946 	dq->dq_cnt++;
    947 	KASSERT(dq->dq_cnt > 0);
    948 }
    949 
    950 /*
    951  * Release a reference to a dquot.
    952  */
    953 static void
    954 dqrele(struct vnode *vp, struct dquot *dq)
    955 {
    956 
    957 	if (dq == NODQUOT)
    958 		return;
    959 	mutex_enter(&dq->dq_interlock);
    960 	for (;;) {
    961 		mutex_enter(&dqlock);
    962 		if (dq->dq_cnt > 1) {
    963 			dq->dq_cnt--;
    964 			mutex_exit(&dqlock);
    965 			mutex_exit(&dq->dq_interlock);
    966 			return;
    967 		}
    968 		if ((dq->dq_flags & DQ_MOD) == 0)
    969 			break;
    970 		mutex_exit(&dqlock);
    971 		(void) dqsync(vp, dq);
    972 	}
    973 	KASSERT(dq->dq_cnt == 1 && (dq->dq_flags & DQ_MOD) == 0);
    974 	LIST_REMOVE(dq, dq_hash);
    975 	mutex_exit(&dqlock);
    976 	mutex_exit(&dq->dq_interlock);
    977 	mutex_destroy(&dq->dq_interlock);
    978 	pool_cache_put(dquot_cache, dq);
    979 }
    980 
    981 /*
    982  * Update the disk quota in the quota file.
    983  */
    984 static int
    985 dqsync(struct vnode *vp, struct dquot *dq)
    986 {
    987 	struct vnode *dqvp;
    988 	struct iovec aiov;
    989 	struct uio auio;
    990 	int error;
    991 
    992 	if (dq == NODQUOT)
    993 		panic("dqsync: dquot");
    994 	KASSERT(mutex_owned(&dq->dq_interlock));
    995 	if ((dq->dq_flags & DQ_MOD) == 0)
    996 		return (0);
    997 	if ((dqvp = dq->dq_ump->um_quotas[dq->dq_type]) == NULLVP)
    998 		panic("dqsync: file");
    999 	KASSERT(dqvp != vp);
   1000 	vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
   1001 	auio.uio_iov = &aiov;
   1002 	auio.uio_iovcnt = 1;
   1003 	aiov.iov_base = (void *)&dq->dq_dqb;
   1004 	aiov.iov_len = sizeof (struct dqblk);
   1005 	auio.uio_resid = sizeof (struct dqblk);
   1006 	auio.uio_offset = (off_t)(dq->dq_id * sizeof (struct dqblk));
   1007 	auio.uio_rw = UIO_WRITE;
   1008 	UIO_SETUP_SYSSPACE(&auio);
   1009 	error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]);
   1010 	if (auio.uio_resid && error == 0)
   1011 		error = EIO;
   1012 	dq->dq_flags &= ~DQ_MOD;
   1013 	VOP_UNLOCK(dqvp, 0);
   1014 	return (error);
   1015 }
   1016 
   1017 #ifdef DIAGNOSTIC
   1018 /*
   1019  * Check the hash chains for stray dquot's.
   1020  */
   1021 static void
   1022 dqflush(struct vnode *vp)
   1023 {
   1024 	struct dquot *dq;
   1025 	int i;
   1026 
   1027 	mutex_enter(&dqlock);
   1028 	for (i = 0; i <= dqhash; i++)
   1029 		LIST_FOREACH(dq, &dqhashtbl[i], dq_hash)
   1030 			KASSERT(dq->dq_ump->um_quotas[dq->dq_type] != vp);
   1031 	mutex_exit(&dqlock);
   1032 }
   1033 #endif
   1034