Home | History | Annotate | Line # | Download | only in ufs
ufs_quota.c revision 1.57
      1 /*	$NetBSD: ufs_quota.c,v 1.57 2008/01/24 22:55:21 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1990, 1993, 1995
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * Robert Elz at The University of Melbourne.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. Neither the name of the University nor the names of its contributors
     19  *    may be used to endorse or promote products derived from this software
     20  *    without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  *
     34  *	@(#)ufs_quota.c	8.5 (Berkeley) 5/20/95
     35  */
     36 
     37 #include <sys/cdefs.h>
     38 __KERNEL_RCSID(0, "$NetBSD: ufs_quota.c,v 1.57 2008/01/24 22:55:21 ad Exp $");
     39 
     40 #include <sys/param.h>
     41 #include <sys/kernel.h>
     42 #include <sys/systm.h>
     43 #include <sys/namei.h>
     44 #include <sys/malloc.h>
     45 #include <sys/file.h>
     46 #include <sys/proc.h>
     47 #include <sys/vnode.h>
     48 #include <sys/mount.h>
     49 #include <sys/kauth.h>
     50 
     51 #include <ufs/ufs/quota.h>
     52 #include <ufs/ufs/inode.h>
     53 #include <ufs/ufs/ufsmount.h>
     54 #include <ufs/ufs/ufs_extern.h>
     55 
     56 /*
     57  * The following structure records disk usage for a user or group on a
     58  * filesystem. There is one allocated for each quota that exists on any
     59  * filesystem for the current user or group. A cache is kept of recently
     60  * used entries.
     61  * Field markings and the corresponding locks:
     62  * h:	dqlock
     63  * d:	dq_interlock
     64  *
     65  * Lock order is: dq_interlock -> dqlock
     66  *                dq_interlock -> dqvp
     67  */
     68 struct dquot {
     69 	LIST_ENTRY(dquot) dq_hash;	/* h: hash list */
     70 	u_int16_t dq_flags;		/* d: flags, see below */
     71 	u_int16_t dq_type;		/* d: quota type of this dquot */
     72 	u_int32_t dq_cnt;		/* h: count of active references */
     73 	u_int32_t dq_id;		/* d: identifier this applies to */
     74 	struct	ufsmount *dq_ump;	/* d: filesystem this is taken from */
     75 	kmutex_t dq_interlock;		/* d: lock this dquot */
     76 	struct	dqblk dq_dqb;		/* d: actual usage & quotas */
     77 };
     78 /*
     79  * Flag values.
     80  */
     81 #define	DQ_MOD		0x04		/* this quota modified since read */
     82 #define	DQ_FAKE		0x08		/* no limits here, just usage */
     83 #define	DQ_BLKS		0x10		/* has been warned about blk limit */
     84 #define	DQ_INODS	0x20		/* has been warned about inode limit */
     85 /*
     86  * Shorthand notation.
     87  */
     88 #define	dq_bhardlimit	dq_dqb.dqb_bhardlimit
     89 #define	dq_bsoftlimit	dq_dqb.dqb_bsoftlimit
     90 #define	dq_curblocks	dq_dqb.dqb_curblocks
     91 #define	dq_ihardlimit	dq_dqb.dqb_ihardlimit
     92 #define	dq_isoftlimit	dq_dqb.dqb_isoftlimit
     93 #define	dq_curinodes	dq_dqb.dqb_curinodes
     94 #define	dq_btime	dq_dqb.dqb_btime
     95 #define	dq_itime	dq_dqb.dqb_itime
     96 /*
     97  * If the system has never checked for a quota for this file, then it is
     98  * set to NODQUOT.  Once a write attempt is made the inode pointer is set
     99  * to reference a dquot structure.
    100  */
    101 #define	NODQUOT		NULL
    102 
    103 static int chkdqchg(struct inode *, int64_t, kauth_cred_t, int);
    104 static int chkiqchg(struct inode *, int32_t, kauth_cred_t, int);
    105 #ifdef DIAGNOSTIC
    106 static void dqflush(struct vnode *);
    107 #endif
    108 static int dqget(struct vnode *, u_long, struct ufsmount *, int,
    109 		 struct dquot **);
    110 static void dqref(struct dquot *);
    111 static void dqrele(struct vnode *, struct dquot *);
    112 static int dqsync(struct vnode *, struct dquot *);
    113 
    114 static kmutex_t dqlock;
    115 static kcondvar_t dqcv;
    116 /*
    117  * Quota name to error message mapping.
    118  */
    119 static const char *quotatypes[] = INITQFNAMES;
    120 
    121 /*
    122  * Set up the quotas for an inode.
    123  *
    124  * This routine completely defines the semantics of quotas.
    125  * If other criterion want to be used to establish quotas, the
    126  * MAXQUOTAS value in quotas.h should be increased, and the
    127  * additional dquots set up here.
    128  */
    129 int
    130 getinoquota(struct inode *ip)
    131 {
    132 	struct ufsmount *ump = ip->i_ump;
    133 	struct vnode *vp = ITOV(ip);
    134 	int i, error;
    135 	u_int32_t ino_ids[MAXQUOTAS];
    136 
    137 	/*
    138 	 * To avoid deadlocks never update quotas for quota files
    139 	 * on the same file system
    140 	 */
    141 	for (i = 0; i < MAXQUOTAS; i++)
    142 		if (ITOV(ip) == ump->um_quotas[i])
    143 			return 0;
    144 
    145 	ino_ids[USRQUOTA] = ip->i_uid;
    146 	ino_ids[GRPQUOTA] = ip->i_gid;
    147 	for (i = 0; i < MAXQUOTAS; i++) {
    148 		/*
    149 		 * If the file id changed the quota needs update.
    150 		 */
    151 		if (ip->i_dquot[i] != NODQUOT &&
    152 		    ip->i_dquot[i]->dq_id != ino_ids[i]) {
    153 			dqrele(ITOV(ip), ip->i_dquot[i]);
    154 			ip->i_dquot[i] = NODQUOT;
    155 		}
    156 		/*
    157 		 * Set up the quota based on file id.
    158 		 * EINVAL means that quotas are not enabled.
    159 		 */
    160 		if (ip->i_dquot[i] == NODQUOT &&
    161 		    (error = dqget(vp, ino_ids[i], ump, i, &ip->i_dquot[i])) &&
    162 		    error != EINVAL)
    163 			return (error);
    164 	}
    165 	return 0;
    166 }
    167 
    168 /*
    169  * Initialize the quota fields of an inode.
    170  */
    171 void
    172 ufsquota_init(struct inode *ip)
    173 {
    174 	int i;
    175 
    176 	for (i = 0; i < MAXQUOTAS; i++)
    177 		ip->i_dquot[i] = NODQUOT;
    178 }
    179 
    180 /*
    181  * Release the quota fields from an inode.
    182  */
    183 void
    184 ufsquota_free(struct inode *ip)
    185 {
    186 	int i;
    187 
    188 	for (i = 0; i < MAXQUOTAS; i++) {
    189 		dqrele(ITOV(ip), ip->i_dquot[i]);
    190 		ip->i_dquot[i] = NODQUOT;
    191 	}
    192 }
    193 
    194 /*
    195  * Update disk usage, and take corrective action.
    196  */
    197 int
    198 chkdq(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
    199 {
    200 	struct dquot *dq;
    201 	int i;
    202 	int ncurblocks, error;
    203 
    204 	if ((error = getinoquota(ip)) != 0)
    205 		return error;
    206 	if (change == 0)
    207 		return (0);
    208 	if (change < 0) {
    209 		for (i = 0; i < MAXQUOTAS; i++) {
    210 			if ((dq = ip->i_dquot[i]) == NODQUOT)
    211 				continue;
    212 			mutex_enter(&dq->dq_interlock);
    213 			ncurblocks = dq->dq_curblocks + change;
    214 			if (ncurblocks >= 0)
    215 				dq->dq_curblocks = ncurblocks;
    216 			else
    217 				dq->dq_curblocks = 0;
    218 			dq->dq_flags &= ~DQ_BLKS;
    219 			dq->dq_flags |= DQ_MOD;
    220 			mutex_exit(&dq->dq_interlock);
    221 		}
    222 		return (0);
    223 	}
    224 	if ((flags & FORCE) == 0 &&
    225 	    kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL) != 0) {
    226 		for (i = 0; i < MAXQUOTAS; i++) {
    227 			if ((dq = ip->i_dquot[i]) == NODQUOT)
    228 				continue;
    229 			mutex_enter(&dq->dq_interlock);
    230 			error = chkdqchg(ip, change, cred, i);
    231 			mutex_exit(&dq->dq_interlock);
    232 			if (error != 0)
    233 				return (error);
    234 		}
    235 	}
    236 	for (i = 0; i < MAXQUOTAS; i++) {
    237 		if ((dq = ip->i_dquot[i]) == NODQUOT)
    238 			continue;
    239 		mutex_enter(&dq->dq_interlock);
    240 		dq->dq_curblocks += change;
    241 		dq->dq_flags |= DQ_MOD;
    242 		mutex_exit(&dq->dq_interlock);
    243 	}
    244 	return (0);
    245 }
    246 
    247 /*
    248  * Check for a valid change to a users allocation.
    249  * Issue an error message if appropriate.
    250  */
    251 static int
    252 chkdqchg(struct inode *ip, int64_t change, kauth_cred_t cred, int type)
    253 {
    254 	struct dquot *dq = ip->i_dquot[type];
    255 	long ncurblocks = dq->dq_curblocks + change;
    256 
    257 	KASSERT(mutex_owned(&dq->dq_interlock));
    258 	/*
    259 	 * If user would exceed their hard limit, disallow space allocation.
    260 	 */
    261 	if (ncurblocks >= dq->dq_bhardlimit && dq->dq_bhardlimit) {
    262 		if ((dq->dq_flags & DQ_BLKS) == 0 &&
    263 		    ip->i_uid == kauth_cred_geteuid(cred)) {
    264 			uprintf("\n%s: write failed, %s disk limit reached\n",
    265 			    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
    266 			    quotatypes[type]);
    267 			dq->dq_flags |= DQ_BLKS;
    268 		}
    269 		return (EDQUOT);
    270 	}
    271 	/*
    272 	 * If user is over their soft limit for too long, disallow space
    273 	 * allocation. Reset time limit as they cross their soft limit.
    274 	 */
    275 	if (ncurblocks >= dq->dq_bsoftlimit && dq->dq_bsoftlimit) {
    276 		if (dq->dq_curblocks < dq->dq_bsoftlimit) {
    277 			dq->dq_btime = time_second + ip->i_ump->um_btime[type];
    278 			if (ip->i_uid == kauth_cred_geteuid(cred))
    279 				uprintf("\n%s: warning, %s %s\n",
    280 				    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
    281 				    quotatypes[type], "disk quota exceeded");
    282 			return (0);
    283 		}
    284 		if (time_second > dq->dq_btime) {
    285 			if ((dq->dq_flags & DQ_BLKS) == 0 &&
    286 			    ip->i_uid == kauth_cred_geteuid(cred)) {
    287 				uprintf("\n%s: write failed, %s %s\n",
    288 				    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
    289 				    quotatypes[type],
    290 				    "disk quota exceeded for too long");
    291 				dq->dq_flags |= DQ_BLKS;
    292 			}
    293 			return (EDQUOT);
    294 		}
    295 	}
    296 	return (0);
    297 }
    298 
    299 /*
    300  * Check the inode limit, applying corrective action.
    301  */
    302 int
    303 chkiq(struct inode *ip, int32_t change, kauth_cred_t cred, int flags)
    304 {
    305 	struct dquot *dq;
    306 	int i;
    307 	int ncurinodes, error;
    308 
    309 	if ((error = getinoquota(ip)) != 0)
    310 		return error;
    311 	if (change == 0)
    312 		return (0);
    313 	if (change < 0) {
    314 		for (i = 0; i < MAXQUOTAS; i++) {
    315 			if ((dq = ip->i_dquot[i]) == NODQUOT)
    316 				continue;
    317 			mutex_enter(&dq->dq_interlock);
    318 			ncurinodes = dq->dq_curinodes + change;
    319 			if (ncurinodes >= 0)
    320 				dq->dq_curinodes = ncurinodes;
    321 			else
    322 				dq->dq_curinodes = 0;
    323 			dq->dq_flags &= ~DQ_INODS;
    324 			dq->dq_flags |= DQ_MOD;
    325 			mutex_exit(&dq->dq_interlock);
    326 		}
    327 		return (0);
    328 	}
    329 	if ((flags & FORCE) == 0 && kauth_authorize_generic(cred,
    330 	    KAUTH_GENERIC_ISSUSER, NULL) != 0) {
    331 		for (i = 0; i < MAXQUOTAS; i++) {
    332 			if ((dq = ip->i_dquot[i]) == NODQUOT)
    333 				continue;
    334 			mutex_enter(&dq->dq_interlock);
    335 			error = chkiqchg(ip, change, cred, i);
    336 			mutex_exit(&dq->dq_interlock);
    337 			if (error != 0)
    338 				return (error);
    339 		}
    340 	}
    341 	for (i = 0; i < MAXQUOTAS; i++) {
    342 		if ((dq = ip->i_dquot[i]) == NODQUOT)
    343 			continue;
    344 		mutex_enter(&dq->dq_interlock);
    345 		dq->dq_curinodes += change;
    346 		dq->dq_flags |= DQ_MOD;
    347 		mutex_exit(&dq->dq_interlock);
    348 	}
    349 	return (0);
    350 }
    351 
    352 /*
    353  * Check for a valid change to a users allocation.
    354  * Issue an error message if appropriate.
    355  */
    356 static int
    357 chkiqchg(struct inode *ip, int32_t change, kauth_cred_t cred, int type)
    358 {
    359 	struct dquot *dq = ip->i_dquot[type];
    360 	long ncurinodes = dq->dq_curinodes + change;
    361 
    362 	KASSERT(mutex_owned(&dq->dq_interlock));
    363 	/*
    364 	 * If user would exceed their hard limit, disallow inode allocation.
    365 	 */
    366 	if (ncurinodes >= dq->dq_ihardlimit && dq->dq_ihardlimit) {
    367 		if ((dq->dq_flags & DQ_INODS) == 0 &&
    368 		    ip->i_uid == kauth_cred_geteuid(cred)) {
    369 			uprintf("\n%s: write failed, %s inode limit reached\n",
    370 			    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
    371 			    quotatypes[type]);
    372 			dq->dq_flags |= DQ_INODS;
    373 		}
    374 		return (EDQUOT);
    375 	}
    376 	/*
    377 	 * If user is over their soft limit for too long, disallow inode
    378 	 * allocation. Reset time limit as they cross their soft limit.
    379 	 */
    380 	if (ncurinodes >= dq->dq_isoftlimit && dq->dq_isoftlimit) {
    381 		if (dq->dq_curinodes < dq->dq_isoftlimit) {
    382 			dq->dq_itime = time_second + ip->i_ump->um_itime[type];
    383 			if (ip->i_uid == kauth_cred_geteuid(cred))
    384 				uprintf("\n%s: warning, %s %s\n",
    385 				    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
    386 				    quotatypes[type], "inode quota exceeded");
    387 			return (0);
    388 		}
    389 		if (time_second > dq->dq_itime) {
    390 			if ((dq->dq_flags & DQ_INODS) == 0 &&
    391 			    ip->i_uid == kauth_cred_geteuid(cred)) {
    392 				uprintf("\n%s: write failed, %s %s\n",
    393 				    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
    394 				    quotatypes[type],
    395 				    "inode quota exceeded for too long");
    396 				dq->dq_flags |= DQ_INODS;
    397 			}
    398 			return (EDQUOT);
    399 		}
    400 	}
    401 	return (0);
    402 }
    403 
    404 /*
    405  * Code to process quotactl commands.
    406  */
    407 
    408 /*
    409  * Q_QUOTAON - set up a quota file for a particular file system.
    410  */
    411 int
    412 quotaon(struct lwp *l, struct mount *mp, int type, void *fname)
    413 {
    414 	struct ufsmount *ump = VFSTOUFS(mp);
    415 	struct vnode *vp, **vpp, *mvp;
    416 	struct dquot *dq;
    417 	int error;
    418 	struct nameidata nd;
    419 
    420 	vpp = &ump->um_quotas[type];
    421 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname);
    422 	if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0)
    423 		return (error);
    424 	vp = nd.ni_vp;
    425 	VOP_UNLOCK(vp, 0);
    426 	if (vp->v_type != VREG) {
    427 		(void) vn_close(vp, FREAD|FWRITE, l->l_cred, l);
    428 		return (EACCES);
    429 	}
    430 	if (*vpp != vp)
    431 		quotaoff(l, mp, type);
    432 	mutex_enter(&dqlock);
    433 	while ((ump->um_qflags[type] & (QTF_CLOSING | QTF_OPENING)) != 0)
    434 		cv_wait(&dqcv, &dqlock);
    435 	ump->um_qflags[type] |= QTF_OPENING;
    436 	mutex_exit(&dqlock);
    437 	mp->mnt_flag |= MNT_QUOTA;
    438 	vp->v_vflag |= VV_SYSTEM;	/* XXXSMP */
    439 	*vpp = vp;
    440 	/*
    441 	 * Save the credential of the process that turned on quotas.
    442 	 * Set up the time limits for this quota.
    443 	 */
    444 	kauth_cred_hold(l->l_cred);
    445 	ump->um_cred[type] = l->l_cred;
    446 	ump->um_btime[type] = MAX_DQ_TIME;
    447 	ump->um_itime[type] = MAX_IQ_TIME;
    448 	if (dqget(NULLVP, 0, ump, type, &dq) == 0) {
    449 		if (dq->dq_btime > 0)
    450 			ump->um_btime[type] = dq->dq_btime;
    451 		if (dq->dq_itime > 0)
    452 			ump->um_itime[type] = dq->dq_itime;
    453 		dqrele(NULLVP, dq);
    454 	}
    455 	/* Allocate a marker vnode. */
    456 	if ((mvp = vnalloc(mp)) == NULL) {
    457 		error = ENOMEM;
    458 		goto out;
    459 	}
    460 	/*
    461 	 * Search vnodes associated with this mount point,
    462 	 * adding references to quota file being opened.
    463 	 * NB: only need to add dquot's for inodes being modified.
    464 	 */
    465 	mutex_enter(&mntvnode_lock);
    466 again:
    467 	for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
    468 		vmark(mvp, vp);
    469 		mutex_enter(&vp->v_interlock);
    470 		if (vp->v_mount != mp || vismarker(vp) ||
    471 		    vp->v_type == VNON || vp->v_writecount == 0) {
    472 			mutex_exit(&vp->v_interlock);
    473 			continue;
    474 		}
    475 		mutex_exit(&mntvnode_lock);
    476 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
    477 			mutex_enter(&mntvnode_lock);
    478 			(void)vunmark(mvp);
    479 			goto again;
    480 		}
    481 		if ((error = getinoquota(VTOI(vp))) != 0) {
    482 			vput(vp);
    483 			mutex_enter(&mntvnode_lock);
    484 			(void)vunmark(mvp);
    485 			break;
    486 		}
    487 		vput(vp);
    488 		mutex_enter(&mntvnode_lock);
    489 	}
    490 	mutex_exit(&mntvnode_lock);
    491 	vnfree(mvp);
    492  out:
    493 	mutex_enter(&dqlock);
    494 	ump->um_qflags[type] &= ~QTF_OPENING;
    495 	cv_broadcast(&dqcv);
    496 	mutex_exit(&dqlock);
    497 	if (error)
    498 		quotaoff(l, mp, type);
    499 	return (error);
    500 }
    501 
    502 /*
    503  * Q_QUOTAOFF - turn off disk quotas for a filesystem.
    504  */
    505 int
    506 quotaoff(struct lwp *l, struct mount *mp, int type)
    507 {
    508 	struct vnode *vp;
    509 	struct vnode *qvp, *mvp;
    510 	struct ufsmount *ump = VFSTOUFS(mp);
    511 	struct dquot *dq;
    512 	struct inode *ip;
    513 	kauth_cred_t cred;
    514 	int i, error;
    515 
    516 	/* Allocate a marker vnode. */
    517 	if ((mvp = vnalloc(mp)) == NULL)
    518 		return ENOMEM;
    519 
    520 	mutex_enter(&dqlock);
    521 	while ((ump->um_qflags[type] & (QTF_CLOSING | QTF_OPENING)) != 0)
    522 		cv_wait(&dqcv, &dqlock);
    523 	if ((qvp = ump->um_quotas[type]) == NULLVP) {
    524 		mutex_exit(&dqlock);
    525 		vnfree(mvp);
    526 		return (0);
    527 	}
    528 	ump->um_qflags[type] |= QTF_CLOSING;
    529 	mutex_exit(&dqlock);
    530 	/*
    531 	 * Search vnodes associated with this mount point,
    532 	 * deleting any references to quota file being closed.
    533 	 */
    534 	mutex_enter(&mntvnode_lock);
    535 again:
    536 	for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
    537 		vmark(mvp, vp);
    538 		mutex_enter(&vp->v_interlock);
    539 		if (vp->v_mount != mp || vismarker(vp) || vp->v_type == VNON ||
    540 		    (vp->v_iflag & VI_CLEAN) != 0) {
    541 			mutex_exit(&vp->v_interlock);
    542 			continue;
    543 		}
    544 		mutex_exit(&mntvnode_lock);
    545 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
    546 			mutex_enter(&mntvnode_lock);
    547 			(void)vunmark(mvp);
    548 			goto again;
    549 		}
    550 		ip = VTOI(vp);
    551 		dq = ip->i_dquot[type];
    552 		ip->i_dquot[type] = NODQUOT;
    553 		dqrele(vp, dq);
    554 		vput(vp);
    555 		mutex_enter(&mntvnode_lock);
    556 	}
    557 	mutex_exit(&mntvnode_lock);
    558 #ifdef DIAGNOSTIC
    559 	dqflush(qvp);
    560 #endif
    561 	qvp->v_vflag &= ~VV_SYSTEM;
    562 	error = vn_close(qvp, FREAD|FWRITE, l->l_cred, l);
    563 	mutex_enter(&dqlock);
    564 	ump->um_quotas[type] = NULLVP;
    565 	cred = ump->um_cred[type];
    566 	ump->um_cred[type] = NOCRED;
    567 	for (i = 0; i < MAXQUOTAS; i++)
    568 		if (ump->um_quotas[i] != NULLVP)
    569 			break;
    570 	ump->um_qflags[type] &= ~QTF_CLOSING;
    571 	cv_broadcast(&dqcv);
    572 	mutex_exit(&dqlock);
    573 	kauth_cred_free(cred);
    574 	if (i == MAXQUOTAS)
    575 		mp->mnt_flag &= ~MNT_QUOTA;
    576 	return (error);
    577 }
    578 
    579 /*
    580  * Q_GETQUOTA - return current values in a dqblk structure.
    581  */
    582 int
    583 getquota(struct mount *mp, u_long id, int type, void *addr)
    584 {
    585 	struct dquot *dq;
    586 	int error;
    587 
    588 	if ((error = dqget(NULLVP, id, VFSTOUFS(mp), type, &dq)) != 0)
    589 		return (error);
    590 	error = copyout((void *)&dq->dq_dqb, addr, sizeof (struct dqblk));
    591 	dqrele(NULLVP, dq);
    592 	return (error);
    593 }
    594 
    595 /*
    596  * Q_SETQUOTA - assign an entire dqblk structure.
    597  */
    598 int
    599 setquota(struct mount *mp, u_long id, int type, void *addr)
    600 {
    601 	struct dquot *dq;
    602 	struct dquot *ndq;
    603 	struct ufsmount *ump = VFSTOUFS(mp);
    604 	struct dqblk newlim;
    605 	int error;
    606 
    607 	error = copyin(addr, (void *)&newlim, sizeof (struct dqblk));
    608 	if (error)
    609 		return (error);
    610 	if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0)
    611 		return (error);
    612 	dq = ndq;
    613 	mutex_enter(&dq->dq_interlock);
    614 	/*
    615 	 * Copy all but the current values.
    616 	 * Reset time limit if previously had no soft limit or were
    617 	 * under it, but now have a soft limit and are over it.
    618 	 */
    619 	newlim.dqb_curblocks = dq->dq_curblocks;
    620 	newlim.dqb_curinodes = dq->dq_curinodes;
    621 	if (dq->dq_id != 0) {
    622 		newlim.dqb_btime = dq->dq_btime;
    623 		newlim.dqb_itime = dq->dq_itime;
    624 	}
    625 	if (newlim.dqb_bsoftlimit &&
    626 	    dq->dq_curblocks >= newlim.dqb_bsoftlimit &&
    627 	    (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit))
    628 		newlim.dqb_btime = time_second + ump->um_btime[type];
    629 	if (newlim.dqb_isoftlimit &&
    630 	    dq->dq_curinodes >= newlim.dqb_isoftlimit &&
    631 	    (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit))
    632 		newlim.dqb_itime = time_second + ump->um_itime[type];
    633 	dq->dq_dqb = newlim;
    634 	if (dq->dq_curblocks < dq->dq_bsoftlimit)
    635 		dq->dq_flags &= ~DQ_BLKS;
    636 	if (dq->dq_curinodes < dq->dq_isoftlimit)
    637 		dq->dq_flags &= ~DQ_INODS;
    638 	if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
    639 	    dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
    640 		dq->dq_flags |= DQ_FAKE;
    641 	else
    642 		dq->dq_flags &= ~DQ_FAKE;
    643 	dq->dq_flags |= DQ_MOD;
    644 	mutex_exit(&dq->dq_interlock);
    645 	dqrele(NULLVP, dq);
    646 	return (0);
    647 }
    648 
    649 /*
    650  * Q_SETUSE - set current inode and block usage.
    651  */
    652 int
    653 setuse(struct mount *mp, u_long id, int type, void *addr)
    654 {
    655 	struct dquot *dq;
    656 	struct ufsmount *ump = VFSTOUFS(mp);
    657 	struct dquot *ndq;
    658 	struct dqblk usage;
    659 	int error;
    660 
    661 	error = copyin(addr, (void *)&usage, sizeof (struct dqblk));
    662 	if (error)
    663 		return (error);
    664 	if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0)
    665 		return (error);
    666 	dq = ndq;
    667 	mutex_enter(&dq->dq_interlock);
    668 	/*
    669 	 * Reset time limit if have a soft limit and were
    670 	 * previously under it, but are now over it.
    671 	 */
    672 	if (dq->dq_bsoftlimit && dq->dq_curblocks < dq->dq_bsoftlimit &&
    673 	    usage.dqb_curblocks >= dq->dq_bsoftlimit)
    674 		dq->dq_btime = time_second + ump->um_btime[type];
    675 	if (dq->dq_isoftlimit && dq->dq_curinodes < dq->dq_isoftlimit &&
    676 	    usage.dqb_curinodes >= dq->dq_isoftlimit)
    677 		dq->dq_itime = time_second + ump->um_itime[type];
    678 	dq->dq_curblocks = usage.dqb_curblocks;
    679 	dq->dq_curinodes = usage.dqb_curinodes;
    680 	if (dq->dq_curblocks < dq->dq_bsoftlimit)
    681 		dq->dq_flags &= ~DQ_BLKS;
    682 	if (dq->dq_curinodes < dq->dq_isoftlimit)
    683 		dq->dq_flags &= ~DQ_INODS;
    684 	dq->dq_flags |= DQ_MOD;
    685 	mutex_exit(&dq->dq_interlock);
    686 	dqrele(NULLVP, dq);
    687 	return (0);
    688 }
    689 
    690 /*
    691  * Q_SYNC - sync quota files to disk.
    692  */
    693 int
    694 qsync(struct mount *mp)
    695 {
    696 	struct ufsmount *ump = VFSTOUFS(mp);
    697 	struct vnode *vp, *mvp;
    698 	struct dquot *dq;
    699 	int i, error;
    700 
    701 	/*
    702 	 * Check if the mount point has any quotas.
    703 	 * If not, simply return.
    704 	 */
    705 	for (i = 0; i < MAXQUOTAS; i++)
    706 		if (ump->um_quotas[i] != NULLVP)
    707 			break;
    708 	if (i == MAXQUOTAS)
    709 		return (0);
    710 
    711 	/* Allocate a marker vnode. */
    712 	if ((mvp = vnalloc(mp)) == NULL)
    713 		return (ENOMEM);
    714 
    715 	/*
    716 	 * Search vnodes associated with this mount point,
    717 	 * synchronizing any modified dquot structures.
    718 	 */
    719 	mutex_enter(&mntvnode_lock);
    720  again:
    721 	for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
    722 		vmark(mvp, vp);
    723 		mutex_enter(&vp->v_interlock);
    724 		if (vp->v_mount != mp || vismarker(vp) || vp->v_type == VNON) {
    725 			mutex_exit(&vp->v_interlock);
    726 			continue;
    727 		}
    728 		mutex_exit(&mntvnode_lock);
    729 		error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
    730 		if (error) {
    731 			mutex_enter(&mntvnode_lock);
    732 			if (error == ENOENT) {
    733 				(void)vunmark(mvp);
    734 				goto again;
    735 			}
    736 			continue;
    737 		}
    738 		for (i = 0; i < MAXQUOTAS; i++) {
    739 			dq = VTOI(vp)->i_dquot[i];
    740 			if (dq == NODQUOT)
    741 				continue;
    742 			mutex_enter(&dq->dq_interlock);
    743 			if (dq->dq_flags & DQ_MOD)
    744 				dqsync(vp, dq);
    745 			mutex_exit(&dq->dq_interlock);
    746 		}
    747 		vput(vp);
    748 		mutex_enter(&mntvnode_lock);
    749 	}
    750 	mutex_exit(&mntvnode_lock);
    751 	vnfree(mvp);
    752 	return (0);
    753 }
    754 
    755 /*
    756  * Code pertaining to management of the in-core dquot data structures.
    757  */
    758 #define DQHASH(dqvp, id) \
    759 	(((((long)(dqvp)) >> 8) + id) & dqhash)
    760 static LIST_HEAD(dqhashhead, dquot) *dqhashtbl;
    761 static u_long dqhash;
    762 static pool_cache_t dquot_cache;
    763 
    764 MALLOC_JUSTDEFINE(M_DQUOT, "UFS quota", "UFS quota entries");
    765 
    766 /*
    767  * Initialize the quota system.
    768  */
    769 void
    770 dqinit(void)
    771 {
    772 
    773 	mutex_init(&dqlock, MUTEX_DEFAULT, IPL_NONE);
    774 	cv_init(&dqcv, "quota");
    775 	malloc_type_attach(M_DQUOT);
    776 	dqhashtbl =
    777 	    hashinit(desiredvnodes, HASH_LIST, M_DQUOT, M_WAITOK, &dqhash);
    778 	dquot_cache = pool_cache_init(sizeof(struct dquot), 0, 0, 0, "ufsdq",
    779 	    NULL, IPL_NONE, NULL, NULL, NULL);
    780 }
    781 
    782 void
    783 dqreinit(void)
    784 {
    785 	struct dquot *dq;
    786 	struct dqhashhead *oldhash, *hash;
    787 	struct vnode *dqvp;
    788 	u_long oldmask, mask, hashval;
    789 	int i;
    790 
    791 	hash = hashinit(desiredvnodes, HASH_LIST, M_DQUOT, M_WAITOK, &mask);
    792 	mutex_enter(&dqlock);
    793 	oldhash = dqhashtbl;
    794 	oldmask = dqhash;
    795 	dqhashtbl = hash;
    796 	dqhash = mask;
    797 	for (i = 0; i <= oldmask; i++) {
    798 		while ((dq = LIST_FIRST(&oldhash[i])) != NULL) {
    799 			dqvp = dq->dq_ump->um_quotas[dq->dq_type];
    800 			LIST_REMOVE(dq, dq_hash);
    801 			hashval = DQHASH(dqvp, dq->dq_id);
    802 			LIST_INSERT_HEAD(&dqhashtbl[hashval], dq, dq_hash);
    803 		}
    804 	}
    805 	mutex_exit(&dqlock);
    806 	hashdone(oldhash, M_DQUOT);
    807 }
    808 
    809 /*
    810  * Free resources held by quota system.
    811  */
    812 void
    813 dqdone(void)
    814 {
    815 
    816 	pool_cache_destroy(dquot_cache);
    817 	hashdone(dqhashtbl, M_DQUOT);
    818 	malloc_type_detach(M_DQUOT);
    819 	cv_destroy(&dqcv);
    820 	mutex_destroy(&dqlock);
    821 }
    822 
    823 /*
    824  * Obtain a dquot structure for the specified identifier and quota file
    825  * reading the information from the file if necessary.
    826  */
    827 static int
    828 dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type,
    829     struct dquot **dqp)
    830 {
    831 	struct dquot *dq, *ndq;
    832 	struct dqhashhead *dqh;
    833 	struct vnode *dqvp;
    834 	struct iovec aiov;
    835 	struct uio auio;
    836 	int error;
    837 
    838 	/* Lock to see an up to date value for QTF_CLOSING. */
    839 	mutex_enter(&dqlock);
    840 	dqvp = ump->um_quotas[type];
    841 	if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) {
    842 		mutex_exit(&dqlock);
    843 		*dqp = NODQUOT;
    844 		return (EINVAL);
    845 	}
    846 	KASSERT(dqvp != vp);
    847 	/*
    848 	 * Check the cache first.
    849 	 */
    850 	dqh = &dqhashtbl[DQHASH(dqvp, id)];
    851 	LIST_FOREACH(dq, dqh, dq_hash) {
    852 		if (dq->dq_id != id ||
    853 		    dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
    854 			continue;
    855 		KASSERT(dq->dq_cnt > 0);
    856 		dqref(dq);
    857 		mutex_exit(&dqlock);
    858 		*dqp = dq;
    859 		return (0);
    860 	}
    861 	/*
    862 	 * Not in cache, allocate a new one.
    863 	 */
    864 	mutex_exit(&dqlock);
    865 	ndq = pool_cache_get(dquot_cache, PR_WAITOK);
    866 	/*
    867 	 * Initialize the contents of the dquot structure.
    868 	 */
    869 	memset((char *)ndq, 0, sizeof *ndq);
    870 	ndq->dq_flags = 0;
    871 	ndq->dq_id = id;
    872 	ndq->dq_ump = ump;
    873 	ndq->dq_type = type;
    874 	mutex_init(&ndq->dq_interlock, MUTEX_DEFAULT, IPL_NONE);
    875 	mutex_enter(&dqlock);
    876 	dqh = &dqhashtbl[DQHASH(dqvp, id)];
    877 	LIST_FOREACH(dq, dqh, dq_hash) {
    878 		if (dq->dq_id != id ||
    879 		    dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
    880 			continue;
    881 		/*
    882 		 * Another thread beat us allocating this dquot.
    883 		 */
    884 		KASSERT(dq->dq_cnt > 0);
    885 		dqref(dq);
    886 		mutex_exit(&dqlock);
    887 		pool_cache_put(dquot_cache, ndq);
    888 		*dqp = dq;
    889 		return 0;
    890 	}
    891 	dq = ndq;
    892 	LIST_INSERT_HEAD(dqh, dq, dq_hash);
    893 	dqref(dq);
    894 	mutex_enter(&dq->dq_interlock);
    895 	mutex_exit(&dqlock);
    896 	vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
    897 	auio.uio_iov = &aiov;
    898 	auio.uio_iovcnt = 1;
    899 	aiov.iov_base = (void *)&dq->dq_dqb;
    900 	aiov.iov_len = sizeof (struct dqblk);
    901 	auio.uio_resid = sizeof (struct dqblk);
    902 	auio.uio_offset = (off_t)(id * sizeof (struct dqblk));
    903 	auio.uio_rw = UIO_READ;
    904 	UIO_SETUP_SYSSPACE(&auio);
    905 	error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
    906 	if (auio.uio_resid == sizeof(struct dqblk) && error == 0)
    907 		memset((void *)&dq->dq_dqb, 0, sizeof(struct dqblk));
    908 	VOP_UNLOCK(dqvp, 0);
    909 	/*
    910 	 * I/O error in reading quota file, release
    911 	 * quota structure and reflect problem to caller.
    912 	 */
    913 	if (error) {
    914 		mutex_enter(&dqlock);
    915 		LIST_REMOVE(dq, dq_hash);
    916 		mutex_exit(&dqlock);
    917 		mutex_exit(&dq->dq_interlock);
    918 		dqrele(vp, dq);
    919 		*dqp = NODQUOT;
    920 		return (error);
    921 	}
    922 	/*
    923 	 * Check for no limit to enforce.
    924 	 * Initialize time values if necessary.
    925 	 */
    926 	if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
    927 	    dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
    928 		dq->dq_flags |= DQ_FAKE;
    929 	if (dq->dq_id != 0) {
    930 		if (dq->dq_btime == 0)
    931 			dq->dq_btime = time_second + ump->um_btime[type];
    932 		if (dq->dq_itime == 0)
    933 			dq->dq_itime = time_second + ump->um_itime[type];
    934 	}
    935 	mutex_exit(&dq->dq_interlock);
    936 	*dqp = dq;
    937 	return (0);
    938 }
    939 
    940 /*
    941  * Obtain a reference to a dquot.
    942  */
    943 static void
    944 dqref(struct dquot *dq)
    945 {
    946 
    947 	KASSERT(mutex_owned(&dqlock));
    948 	dq->dq_cnt++;
    949 	KASSERT(dq->dq_cnt > 0);
    950 }
    951 
    952 /*
    953  * Release a reference to a dquot.
    954  */
    955 static void
    956 dqrele(struct vnode *vp, struct dquot *dq)
    957 {
    958 
    959 	if (dq == NODQUOT)
    960 		return;
    961 	mutex_enter(&dq->dq_interlock);
    962 	for (;;) {
    963 		mutex_enter(&dqlock);
    964 		if (dq->dq_cnt > 1) {
    965 			dq->dq_cnt--;
    966 			mutex_exit(&dqlock);
    967 			mutex_exit(&dq->dq_interlock);
    968 			return;
    969 		}
    970 		if ((dq->dq_flags & DQ_MOD) == 0)
    971 			break;
    972 		mutex_exit(&dqlock);
    973 		(void) dqsync(vp, dq);
    974 	}
    975 	KASSERT(dq->dq_cnt == 1 && (dq->dq_flags & DQ_MOD) == 0);
    976 	LIST_REMOVE(dq, dq_hash);
    977 	mutex_exit(&dqlock);
    978 	mutex_exit(&dq->dq_interlock);
    979 	mutex_destroy(&dq->dq_interlock);
    980 	pool_cache_put(dquot_cache, dq);
    981 }
    982 
    983 /*
    984  * Update the disk quota in the quota file.
    985  */
    986 static int
    987 dqsync(struct vnode *vp, struct dquot *dq)
    988 {
    989 	struct vnode *dqvp;
    990 	struct iovec aiov;
    991 	struct uio auio;
    992 	int error;
    993 
    994 	if (dq == NODQUOT)
    995 		panic("dqsync: dquot");
    996 	KASSERT(mutex_owned(&dq->dq_interlock));
    997 	if ((dq->dq_flags & DQ_MOD) == 0)
    998 		return (0);
    999 	if ((dqvp = dq->dq_ump->um_quotas[dq->dq_type]) == NULLVP)
   1000 		panic("dqsync: file");
   1001 	KASSERT(dqvp != vp);
   1002 	vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
   1003 	auio.uio_iov = &aiov;
   1004 	auio.uio_iovcnt = 1;
   1005 	aiov.iov_base = (void *)&dq->dq_dqb;
   1006 	aiov.iov_len = sizeof (struct dqblk);
   1007 	auio.uio_resid = sizeof (struct dqblk);
   1008 	auio.uio_offset = (off_t)(dq->dq_id * sizeof (struct dqblk));
   1009 	auio.uio_rw = UIO_WRITE;
   1010 	UIO_SETUP_SYSSPACE(&auio);
   1011 	error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]);
   1012 	if (auio.uio_resid && error == 0)
   1013 		error = EIO;
   1014 	dq->dq_flags &= ~DQ_MOD;
   1015 	VOP_UNLOCK(dqvp, 0);
   1016 	return (error);
   1017 }
   1018 
   1019 #ifdef DIAGNOSTIC
   1020 /*
   1021  * Check the hash chains for stray dquot's.
   1022  */
   1023 static void
   1024 dqflush(struct vnode *vp)
   1025 {
   1026 	struct dquot *dq;
   1027 	int i;
   1028 
   1029 	mutex_enter(&dqlock);
   1030 	for (i = 0; i <= dqhash; i++)
   1031 		LIST_FOREACH(dq, &dqhashtbl[i], dq_hash)
   1032 			KASSERT(dq->dq_ump->um_quotas[dq->dq_type] != vp);
   1033 	mutex_exit(&dqlock);
   1034 }
   1035 #endif
   1036