ufs_quota.c revision 1.22 1 /* $NetBSD: ufs_quota.c,v 1.22 2001/11/08 02:39:17 lukem Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1990, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Robert Elz at The University of Melbourne.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: ufs_quota.c,v 1.22 2001/11/08 02:39:17 lukem Exp $");
43
44 #include <sys/param.h>
45 #include <sys/kernel.h>
46 #include <sys/systm.h>
47 #include <sys/namei.h>
48 #include <sys/malloc.h>
49 #include <sys/file.h>
50 #include <sys/proc.h>
51 #include <sys/vnode.h>
52 #include <sys/mount.h>
53
54 #include <ufs/ufs/quota.h>
55 #include <ufs/ufs/inode.h>
56 #include <ufs/ufs/ufsmount.h>
57 #include <ufs/ufs/ufs_extern.h>
58
59 /*
60 * Quota name to error message mapping.
61 */
62 static char *quotatypes[] = INITQFNAMES;
63
64 /*
65 * Set up the quotas for an inode.
66 *
67 * This routine completely defines the semantics of quotas.
68 * If other criterion want to be used to establish quotas, the
69 * MAXQUOTAS value in quotas.h should be increased, and the
70 * additional dquots set up here.
71 */
72 int
73 getinoquota(ip)
74 struct inode *ip;
75 {
76 struct ufsmount *ump;
77 struct vnode *vp = ITOV(ip);
78 int error;
79
80 ump = VFSTOUFS(vp->v_mount);
81 /*
82 * Set up the user quota based on file uid.
83 * EINVAL means that quotas are not enabled.
84 */
85 if (ip->i_dquot[USRQUOTA] == NODQUOT &&
86 (error =
87 dqget(vp, ip->i_ffs_uid, ump, USRQUOTA, &ip->i_dquot[USRQUOTA])) &&
88 error != EINVAL)
89 return (error);
90 /*
91 * Set up the group quota based on file gid.
92 * EINVAL means that quotas are not enabled.
93 */
94 if (ip->i_dquot[GRPQUOTA] == NODQUOT &&
95 (error =
96 dqget(vp, ip->i_ffs_gid, ump, GRPQUOTA, &ip->i_dquot[GRPQUOTA])) &&
97 error != EINVAL)
98 return (error);
99 return (0);
100 }
101
102 /*
103 * Update disk usage, and take corrective action.
104 */
105 int
106 chkdq(ip, change, cred, flags)
107 struct inode *ip;
108 long change;
109 struct ucred *cred;
110 int flags;
111 {
112 struct dquot *dq;
113 int i;
114 int ncurblocks, error;
115
116 #ifdef DIAGNOSTIC
117 if ((flags & CHOWN) == 0)
118 chkdquot(ip);
119 #endif
120 if (change == 0)
121 return (0);
122 if (change < 0) {
123 for (i = 0; i < MAXQUOTAS; i++) {
124 if ((dq = ip->i_dquot[i]) == NODQUOT)
125 continue;
126 while (dq->dq_flags & DQ_LOCK) {
127 dq->dq_flags |= DQ_WANT;
128 (void) tsleep(dq, PINOD+1, "chkdq", 0);
129 }
130 ncurblocks = dq->dq_curblocks + change;
131 if (ncurblocks >= 0)
132 dq->dq_curblocks = ncurblocks;
133 else
134 dq->dq_curblocks = 0;
135 dq->dq_flags &= ~DQ_BLKS;
136 dq->dq_flags |= DQ_MOD;
137 }
138 return (0);
139 }
140 if ((flags & FORCE) == 0 &&
141 (cred != NOCRED && cred->cr_uid != 0)) {
142 for (i = 0; i < MAXQUOTAS; i++) {
143 if ((dq = ip->i_dquot[i]) == NODQUOT)
144 continue;
145 if ((error = chkdqchg(ip, change, cred, i)) != 0)
146 return (error);
147 }
148 }
149 for (i = 0; i < MAXQUOTAS; i++) {
150 if ((dq = ip->i_dquot[i]) == NODQUOT)
151 continue;
152 while (dq->dq_flags & DQ_LOCK) {
153 dq->dq_flags |= DQ_WANT;
154 (void) tsleep(dq, PINOD+1, "chkdq", 0);
155 }
156 dq->dq_curblocks += change;
157 dq->dq_flags |= DQ_MOD;
158 }
159 return (0);
160 }
161
162 /*
163 * Check for a valid change to a users allocation.
164 * Issue an error message if appropriate.
165 */
166 int
167 chkdqchg(ip, change, cred, type)
168 struct inode *ip;
169 long change;
170 struct ucred *cred;
171 int type;
172 {
173 struct dquot *dq = ip->i_dquot[type];
174 long ncurblocks = dq->dq_curblocks + change;
175
176 /*
177 * If user would exceed their hard limit, disallow space allocation.
178 */
179 if (ncurblocks >= dq->dq_bhardlimit && dq->dq_bhardlimit) {
180 if ((dq->dq_flags & DQ_BLKS) == 0 &&
181 ip->i_ffs_uid == cred->cr_uid) {
182 uprintf("\n%s: write failed, %s disk limit reached\n",
183 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
184 quotatypes[type]);
185 dq->dq_flags |= DQ_BLKS;
186 }
187 return (EDQUOT);
188 }
189 /*
190 * If user is over their soft limit for too long, disallow space
191 * allocation. Reset time limit as they cross their soft limit.
192 */
193 if (ncurblocks >= dq->dq_bsoftlimit && dq->dq_bsoftlimit) {
194 if (dq->dq_curblocks < dq->dq_bsoftlimit) {
195 dq->dq_btime = time.tv_sec +
196 VFSTOUFS(ITOV(ip)->v_mount)->um_btime[type];
197 if (ip->i_ffs_uid == cred->cr_uid)
198 uprintf("\n%s: warning, %s %s\n",
199 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
200 quotatypes[type], "disk quota exceeded");
201 return (0);
202 }
203 if (time.tv_sec > dq->dq_btime) {
204 if ((dq->dq_flags & DQ_BLKS) == 0 &&
205 ip->i_ffs_uid == cred->cr_uid) {
206 uprintf("\n%s: write failed, %s %s\n",
207 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
208 quotatypes[type],
209 "disk quota exceeded for too long");
210 dq->dq_flags |= DQ_BLKS;
211 }
212 return (EDQUOT);
213 }
214 }
215 return (0);
216 }
217
218 /*
219 * Check the inode limit, applying corrective action.
220 */
221 int
222 chkiq(ip, change, cred, flags)
223 struct inode *ip;
224 long change;
225 struct ucred *cred;
226 int flags;
227 {
228 struct dquot *dq;
229 int i;
230 int ncurinodes, error;
231
232 #ifdef DIAGNOSTIC
233 if ((flags & CHOWN) == 0)
234 chkdquot(ip);
235 #endif
236 if (change == 0)
237 return (0);
238 if (change < 0) {
239 for (i = 0; i < MAXQUOTAS; i++) {
240 if ((dq = ip->i_dquot[i]) == NODQUOT)
241 continue;
242 while (dq->dq_flags & DQ_LOCK) {
243 dq->dq_flags |= DQ_WANT;
244 (void) tsleep(dq, PINOD+1, "chkiq", 0);
245 }
246 ncurinodes = dq->dq_curinodes + change;
247 if (ncurinodes >= 0)
248 dq->dq_curinodes = ncurinodes;
249 else
250 dq->dq_curinodes = 0;
251 dq->dq_flags &= ~DQ_INODS;
252 dq->dq_flags |= DQ_MOD;
253 }
254 return (0);
255 }
256 if ((flags & FORCE) == 0 && cred->cr_uid != 0) {
257 for (i = 0; i < MAXQUOTAS; i++) {
258 if ((dq = ip->i_dquot[i]) == NODQUOT)
259 continue;
260 if ((error = chkiqchg(ip, change, cred, i)) != 0)
261 return (error);
262 }
263 }
264 for (i = 0; i < MAXQUOTAS; i++) {
265 if ((dq = ip->i_dquot[i]) == NODQUOT)
266 continue;
267 while (dq->dq_flags & DQ_LOCK) {
268 dq->dq_flags |= DQ_WANT;
269 (void) tsleep(dq, PINOD+1, "chkiq", 0);
270 }
271 dq->dq_curinodes += change;
272 dq->dq_flags |= DQ_MOD;
273 }
274 return (0);
275 }
276
277 /*
278 * Check for a valid change to a users allocation.
279 * Issue an error message if appropriate.
280 */
281 int
282 chkiqchg(ip, change, cred, type)
283 struct inode *ip;
284 long change;
285 struct ucred *cred;
286 int type;
287 {
288 struct dquot *dq = ip->i_dquot[type];
289 long ncurinodes = dq->dq_curinodes + change;
290
291 /*
292 * If user would exceed their hard limit, disallow inode allocation.
293 */
294 if (ncurinodes >= dq->dq_ihardlimit && dq->dq_ihardlimit) {
295 if ((dq->dq_flags & DQ_INODS) == 0 &&
296 ip->i_ffs_uid == cred->cr_uid) {
297 uprintf("\n%s: write failed, %s inode limit reached\n",
298 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
299 quotatypes[type]);
300 dq->dq_flags |= DQ_INODS;
301 }
302 return (EDQUOT);
303 }
304 /*
305 * If user is over their soft limit for too long, disallow inode
306 * allocation. Reset time limit as they cross their soft limit.
307 */
308 if (ncurinodes >= dq->dq_isoftlimit && dq->dq_isoftlimit) {
309 if (dq->dq_curinodes < dq->dq_isoftlimit) {
310 dq->dq_itime = time.tv_sec +
311 VFSTOUFS(ITOV(ip)->v_mount)->um_itime[type];
312 if (ip->i_ffs_uid == cred->cr_uid)
313 uprintf("\n%s: warning, %s %s\n",
314 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
315 quotatypes[type], "inode quota exceeded");
316 return (0);
317 }
318 if (time.tv_sec > dq->dq_itime) {
319 if ((dq->dq_flags & DQ_INODS) == 0 &&
320 ip->i_ffs_uid == cred->cr_uid) {
321 uprintf("\n%s: write failed, %s %s\n",
322 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
323 quotatypes[type],
324 "inode quota exceeded for too long");
325 dq->dq_flags |= DQ_INODS;
326 }
327 return (EDQUOT);
328 }
329 }
330 return (0);
331 }
332
333 #ifdef DIAGNOSTIC
334 /*
335 * On filesystems with quotas enabled, it is an error for a file to change
336 * size and not to have a dquot structure associated with it.
337 */
338 void
339 chkdquot(ip)
340 struct inode *ip;
341 {
342 struct ufsmount *ump = VFSTOUFS(ITOV(ip)->v_mount);
343 int i;
344
345 for (i = 0; i < MAXQUOTAS; i++) {
346 if (ump->um_quotas[i] == NULLVP ||
347 (ump->um_qflags[i] & (QTF_OPENING|QTF_CLOSING)))
348 continue;
349 if (ip->i_dquot[i] == NODQUOT) {
350 vprint("chkdquot: missing dquot", ITOV(ip));
351 panic("missing dquot");
352 }
353 }
354 }
355 #endif
356
357 /*
358 * Code to process quotactl commands.
359 */
360
361 /*
362 * Q_QUOTAON - set up a quota file for a particular file system.
363 */
364 int
365 quotaon(p, mp, type, fname)
366 struct proc *p;
367 struct mount *mp;
368 int type;
369 caddr_t fname;
370 {
371 struct ufsmount *ump = VFSTOUFS(mp);
372 struct vnode *vp, **vpp;
373 struct vnode *nextvp;
374 struct dquot *dq;
375 int error;
376 struct nameidata nd;
377
378 vpp = &ump->um_quotas[type];
379 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname, p);
380 if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0)
381 return (error);
382 vp = nd.ni_vp;
383 VOP_UNLOCK(vp, 0);
384 if (vp->v_type != VREG) {
385 (void) vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
386 return (EACCES);
387 }
388 if (*vpp != vp)
389 quotaoff(p, mp, type);
390 ump->um_qflags[type] |= QTF_OPENING;
391 mp->mnt_flag |= MNT_QUOTA;
392 vp->v_flag |= VSYSTEM;
393 *vpp = vp;
394 /*
395 * Save the credential of the process that turned on quotas.
396 * Set up the time limits for this quota.
397 */
398 crhold(p->p_ucred);
399 ump->um_cred[type] = p->p_ucred;
400 ump->um_btime[type] = MAX_DQ_TIME;
401 ump->um_itime[type] = MAX_IQ_TIME;
402 if (dqget(NULLVP, 0, ump, type, &dq) == 0) {
403 if (dq->dq_btime > 0)
404 ump->um_btime[type] = dq->dq_btime;
405 if (dq->dq_itime > 0)
406 ump->um_itime[type] = dq->dq_itime;
407 dqrele(NULLVP, dq);
408 }
409 /*
410 * Search vnodes associated with this mount point,
411 * adding references to quota file being opened.
412 * NB: only need to add dquot's for inodes being modified.
413 */
414 again:
415 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
416 nextvp = LIST_NEXT(vp, v_mntvnodes);
417 if (vp->v_type == VNON ||vp->v_writecount == 0)
418 continue;
419 if (vget(vp, LK_EXCLUSIVE))
420 goto again;
421 if ((error = getinoquota(VTOI(vp))) != 0) {
422 vput(vp);
423 break;
424 }
425 vput(vp);
426 if (LIST_NEXT(vp, v_mntvnodes) != nextvp || vp->v_mount != mp)
427 goto again;
428 }
429 ump->um_qflags[type] &= ~QTF_OPENING;
430 if (error)
431 quotaoff(p, mp, type);
432 return (error);
433 }
434
435 /*
436 * Q_QUOTAOFF - turn off disk quotas for a filesystem.
437 */
438 int
439 quotaoff(p, mp, type)
440 struct proc *p;
441 struct mount *mp;
442 int type;
443 {
444 struct vnode *vp;
445 struct vnode *qvp, *nextvp;
446 struct ufsmount *ump = VFSTOUFS(mp);
447 struct dquot *dq;
448 struct inode *ip;
449 int error;
450
451 if ((qvp = ump->um_quotas[type]) == NULLVP)
452 return (0);
453 ump->um_qflags[type] |= QTF_CLOSING;
454 /*
455 * Search vnodes associated with this mount point,
456 * deleting any references to quota file being closed.
457 */
458 again:
459 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
460 nextvp = LIST_NEXT(vp, v_mntvnodes);
461 if (vp->v_type == VNON)
462 continue;
463 if (vget(vp, LK_EXCLUSIVE))
464 goto again;
465 ip = VTOI(vp);
466 dq = ip->i_dquot[type];
467 ip->i_dquot[type] = NODQUOT;
468 dqrele(vp, dq);
469 vput(vp);
470 if (LIST_NEXT(vp, v_mntvnodes) != nextvp || vp->v_mount != mp)
471 goto again;
472 }
473 dqflush(qvp);
474 qvp->v_flag &= ~VSYSTEM;
475 error = vn_close(qvp, FREAD|FWRITE, p->p_ucred, p);
476 ump->um_quotas[type] = NULLVP;
477 crfree(ump->um_cred[type]);
478 ump->um_cred[type] = NOCRED;
479 ump->um_qflags[type] &= ~QTF_CLOSING;
480 for (type = 0; type < MAXQUOTAS; type++)
481 if (ump->um_quotas[type] != NULLVP)
482 break;
483 if (type == MAXQUOTAS)
484 mp->mnt_flag &= ~MNT_QUOTA;
485 return (error);
486 }
487
488 /*
489 * Q_GETQUOTA - return current values in a dqblk structure.
490 */
491 int
492 getquota(mp, id, type, addr)
493 struct mount *mp;
494 u_long id;
495 int type;
496 caddr_t addr;
497 {
498 struct dquot *dq;
499 int error;
500
501 if ((error = dqget(NULLVP, id, VFSTOUFS(mp), type, &dq)) != 0)
502 return (error);
503 error = copyout((caddr_t)&dq->dq_dqb, addr, sizeof (struct dqblk));
504 dqrele(NULLVP, dq);
505 return (error);
506 }
507
508 /*
509 * Q_SETQUOTA - assign an entire dqblk structure.
510 */
511 int
512 setquota(mp, id, type, addr)
513 struct mount *mp;
514 u_long id;
515 int type;
516 caddr_t addr;
517 {
518 struct dquot *dq;
519 struct dquot *ndq;
520 struct ufsmount *ump = VFSTOUFS(mp);
521 struct dqblk newlim;
522 int error;
523
524 error = copyin(addr, (caddr_t)&newlim, sizeof (struct dqblk));
525 if (error)
526 return (error);
527 if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0)
528 return (error);
529 dq = ndq;
530 while (dq->dq_flags & DQ_LOCK) {
531 dq->dq_flags |= DQ_WANT;
532 (void) tsleep(dq, PINOD+1, "setquota", 0);
533 }
534 /*
535 * Copy all but the current values.
536 * Reset time limit if previously had no soft limit or were
537 * under it, but now have a soft limit and are over it.
538 */
539 newlim.dqb_curblocks = dq->dq_curblocks;
540 newlim.dqb_curinodes = dq->dq_curinodes;
541 if (dq->dq_id != 0) {
542 newlim.dqb_btime = dq->dq_btime;
543 newlim.dqb_itime = dq->dq_itime;
544 }
545 if (newlim.dqb_bsoftlimit &&
546 dq->dq_curblocks >= newlim.dqb_bsoftlimit &&
547 (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit))
548 newlim.dqb_btime = time.tv_sec + ump->um_btime[type];
549 if (newlim.dqb_isoftlimit &&
550 dq->dq_curinodes >= newlim.dqb_isoftlimit &&
551 (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit))
552 newlim.dqb_itime = time.tv_sec + ump->um_itime[type];
553 dq->dq_dqb = newlim;
554 if (dq->dq_curblocks < dq->dq_bsoftlimit)
555 dq->dq_flags &= ~DQ_BLKS;
556 if (dq->dq_curinodes < dq->dq_isoftlimit)
557 dq->dq_flags &= ~DQ_INODS;
558 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
559 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
560 dq->dq_flags |= DQ_FAKE;
561 else
562 dq->dq_flags &= ~DQ_FAKE;
563 dq->dq_flags |= DQ_MOD;
564 dqrele(NULLVP, dq);
565 return (0);
566 }
567
568 /*
569 * Q_SETUSE - set current inode and block usage.
570 */
571 int
572 setuse(mp, id, type, addr)
573 struct mount *mp;
574 u_long id;
575 int type;
576 caddr_t addr;
577 {
578 struct dquot *dq;
579 struct ufsmount *ump = VFSTOUFS(mp);
580 struct dquot *ndq;
581 struct dqblk usage;
582 int error;
583
584 error = copyin(addr, (caddr_t)&usage, sizeof (struct dqblk));
585 if (error)
586 return (error);
587 if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0)
588 return (error);
589 dq = ndq;
590 while (dq->dq_flags & DQ_LOCK) {
591 dq->dq_flags |= DQ_WANT;
592 (void) tsleep(dq, PINOD+1, "setuse", 0);
593 }
594 /*
595 * Reset time limit if have a soft limit and were
596 * previously under it, but are now over it.
597 */
598 if (dq->dq_bsoftlimit && dq->dq_curblocks < dq->dq_bsoftlimit &&
599 usage.dqb_curblocks >= dq->dq_bsoftlimit)
600 dq->dq_btime = time.tv_sec + ump->um_btime[type];
601 if (dq->dq_isoftlimit && dq->dq_curinodes < dq->dq_isoftlimit &&
602 usage.dqb_curinodes >= dq->dq_isoftlimit)
603 dq->dq_itime = time.tv_sec + ump->um_itime[type];
604 dq->dq_curblocks = usage.dqb_curblocks;
605 dq->dq_curinodes = usage.dqb_curinodes;
606 if (dq->dq_curblocks < dq->dq_bsoftlimit)
607 dq->dq_flags &= ~DQ_BLKS;
608 if (dq->dq_curinodes < dq->dq_isoftlimit)
609 dq->dq_flags &= ~DQ_INODS;
610 dq->dq_flags |= DQ_MOD;
611 dqrele(NULLVP, dq);
612 return (0);
613 }
614
615 /*
616 * Q_SYNC - sync quota files to disk.
617 */
618 int
619 qsync(mp)
620 struct mount *mp;
621 {
622 struct ufsmount *ump = VFSTOUFS(mp);
623 struct vnode *vp, *nextvp;
624 struct dquot *dq;
625 int i, error;
626
627 /*
628 * Check if the mount point has any quotas.
629 * If not, simply return.
630 */
631 for (i = 0; i < MAXQUOTAS; i++)
632 if (ump->um_quotas[i] != NULLVP)
633 break;
634 if (i == MAXQUOTAS)
635 return (0);
636 /*
637 * Search vnodes associated with this mount point,
638 * synchronizing any modified dquot structures.
639 */
640 simple_lock(&mntvnode_slock);
641 again:
642 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
643 if (vp->v_mount != mp)
644 goto again;
645 nextvp = LIST_NEXT(vp, v_mntvnodes);
646 if (vp->v_type == VNON)
647 continue;
648 simple_lock(&vp->v_interlock);
649 simple_unlock(&mntvnode_slock);
650 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
651 if (error) {
652 simple_lock(&mntvnode_slock);
653 if (error == ENOENT)
654 goto again;
655 continue;
656 }
657 for (i = 0; i < MAXQUOTAS; i++) {
658 dq = VTOI(vp)->i_dquot[i];
659 if (dq != NODQUOT && (dq->dq_flags & DQ_MOD))
660 dqsync(vp, dq);
661 }
662 vput(vp);
663 simple_lock(&mntvnode_slock);
664 if (LIST_NEXT(vp, v_mntvnodes) != nextvp)
665 goto again;
666 }
667 simple_unlock(&mntvnode_slock);
668 return (0);
669 }
670
671 /*
672 * Code pertaining to management of the in-core dquot data structures.
673 */
674 #define DQHASH(dqvp, id) \
675 (((((long)(dqvp)) >> 8) + id) & dqhash)
676 LIST_HEAD(dqhashhead, dquot) *dqhashtbl;
677 u_long dqhash;
678
679 /*
680 * Dquot free list.
681 */
682 #define DQUOTINC 5 /* minimum free dquots desired */
683 TAILQ_HEAD(dqfreelist, dquot) dqfreelist;
684 long numdquot, desireddquot = DQUOTINC;
685
686 /*
687 * Initialize the quota system.
688 */
689 void
690 dqinit()
691 {
692 dqhashtbl =
693 hashinit(desiredvnodes, HASH_LIST, M_DQUOT, M_WAITOK, &dqhash);
694 TAILQ_INIT(&dqfreelist);
695 }
696
697 void
698 dqreinit()
699 {
700 struct dquot *dq;
701 struct dqhashhead *oldhash, *hash;
702 struct vnode *dqvp;
703 u_long oldmask, mask, hashval;
704 int i;
705
706 hash = hashinit(desiredvnodes, HASH_LIST, M_DQUOT, M_WAITOK, &mask);
707 oldhash = dqhashtbl;
708 oldmask = dqhash;
709 dqhashtbl = hash;
710 dqhash = mask;
711 for (i = 0; i <= oldmask; i++) {
712 while ((dq = LIST_FIRST(&oldhash[i])) != NULL) {
713 dqvp = dq->dq_ump->um_quotas[dq->dq_type];
714 LIST_REMOVE(dq, dq_hash);
715 hashval = DQHASH(dqvp, dq->dq_id);
716 LIST_INSERT_HEAD(&dqhashtbl[hashval], dq, dq_hash);
717 }
718 }
719 hashdone(oldhash, M_DQUOT);
720 }
721
722 /*
723 * Free resources held by quota system.
724 */
725 void
726 dqdone()
727 {
728 hashdone(dqhashtbl, M_DQUOT);
729 }
730
731 /*
732 * Obtain a dquot structure for the specified identifier and quota file
733 * reading the information from the file if necessary.
734 */
735 int
736 dqget(vp, id, ump, type, dqp)
737 struct vnode *vp;
738 u_long id;
739 struct ufsmount *ump;
740 int type;
741 struct dquot **dqp;
742 {
743 struct dquot *dq;
744 struct dqhashhead *dqh;
745 struct vnode *dqvp;
746 struct iovec aiov;
747 struct uio auio;
748 int error;
749
750 dqvp = ump->um_quotas[type];
751 if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) {
752 *dqp = NODQUOT;
753 return (EINVAL);
754 }
755 /*
756 * Check the cache first.
757 */
758 dqh = &dqhashtbl[DQHASH(dqvp, id)];
759 LIST_FOREACH(dq, dqh, dq_hash) {
760 if (dq->dq_id != id ||
761 dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
762 continue;
763 /*
764 * Cache hit with no references. Take
765 * the structure off the free list.
766 */
767 if (dq->dq_cnt == 0)
768 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
769 DQREF(dq);
770 *dqp = dq;
771 return (0);
772 }
773 /*
774 * Not in cache, allocate a new one.
775 */
776 if (dqfreelist.tqh_first == NODQUOT &&
777 numdquot < MAXQUOTAS * desiredvnodes)
778 desireddquot += DQUOTINC;
779 if (numdquot < desireddquot) {
780 dq = (struct dquot *)malloc(sizeof *dq, M_DQUOT, M_WAITOK);
781 memset((char *)dq, 0, sizeof *dq);
782 numdquot++;
783 } else {
784 if ((dq = dqfreelist.tqh_first) == NULL) {
785 tablefull("dquot",
786 "increase kern.maxvnodes or NVNODE");
787 *dqp = NODQUOT;
788 return (EUSERS);
789 }
790 if (dq->dq_cnt || (dq->dq_flags & DQ_MOD))
791 panic("free dquot isn't");
792 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
793 LIST_REMOVE(dq, dq_hash);
794 }
795 /*
796 * Initialize the contents of the dquot structure.
797 */
798 if (vp != dqvp)
799 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
800 LIST_INSERT_HEAD(dqh, dq, dq_hash);
801 DQREF(dq);
802 dq->dq_flags = DQ_LOCK;
803 dq->dq_id = id;
804 dq->dq_ump = ump;
805 dq->dq_type = type;
806 auio.uio_iov = &aiov;
807 auio.uio_iovcnt = 1;
808 aiov.iov_base = (caddr_t)&dq->dq_dqb;
809 aiov.iov_len = sizeof (struct dqblk);
810 auio.uio_resid = sizeof (struct dqblk);
811 auio.uio_offset = (off_t)(id * sizeof (struct dqblk));
812 auio.uio_segflg = UIO_SYSSPACE;
813 auio.uio_rw = UIO_READ;
814 auio.uio_procp = (struct proc *)0;
815 error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
816 if (auio.uio_resid == sizeof(struct dqblk) && error == 0)
817 memset((caddr_t)&dq->dq_dqb, 0, sizeof(struct dqblk));
818 if (vp != dqvp)
819 VOP_UNLOCK(dqvp, 0);
820 if (dq->dq_flags & DQ_WANT)
821 wakeup((caddr_t)dq);
822 dq->dq_flags = 0;
823 /*
824 * I/O error in reading quota file, release
825 * quota structure and reflect problem to caller.
826 */
827 if (error) {
828 LIST_REMOVE(dq, dq_hash);
829 dqrele(vp, dq);
830 *dqp = NODQUOT;
831 return (error);
832 }
833 /*
834 * Check for no limit to enforce.
835 * Initialize time values if necessary.
836 */
837 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
838 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
839 dq->dq_flags |= DQ_FAKE;
840 if (dq->dq_id != 0) {
841 if (dq->dq_btime == 0)
842 dq->dq_btime = time.tv_sec + ump->um_btime[type];
843 if (dq->dq_itime == 0)
844 dq->dq_itime = time.tv_sec + ump->um_itime[type];
845 }
846 *dqp = dq;
847 return (0);
848 }
849
850 /*
851 * Obtain a reference to a dquot.
852 */
853 void
854 dqref(dq)
855 struct dquot *dq;
856 {
857
858 dq->dq_cnt++;
859 }
860
861 /*
862 * Release a reference to a dquot.
863 */
864 void
865 dqrele(vp, dq)
866 struct vnode *vp;
867 struct dquot *dq;
868 {
869
870 if (dq == NODQUOT)
871 return;
872 if (dq->dq_cnt > 1) {
873 dq->dq_cnt--;
874 return;
875 }
876 if (dq->dq_flags & DQ_MOD)
877 (void) dqsync(vp, dq);
878 if (--dq->dq_cnt > 0)
879 return;
880 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
881 }
882
883 /*
884 * Update the disk quota in the quota file.
885 */
886 int
887 dqsync(vp, dq)
888 struct vnode *vp;
889 struct dquot *dq;
890 {
891 struct vnode *dqvp;
892 struct iovec aiov;
893 struct uio auio;
894 int error;
895
896 if (dq == NODQUOT)
897 panic("dqsync: dquot");
898 if ((dq->dq_flags & DQ_MOD) == 0)
899 return (0);
900 if ((dqvp = dq->dq_ump->um_quotas[dq->dq_type]) == NULLVP)
901 panic("dqsync: file");
902 if (vp != dqvp)
903 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
904 while (dq->dq_flags & DQ_LOCK) {
905 dq->dq_flags |= DQ_WANT;
906 (void) tsleep(dq, PINOD+2, "dqsync", 0);
907 if ((dq->dq_flags & DQ_MOD) == 0) {
908 if (vp != dqvp)
909 VOP_UNLOCK(dqvp, 0);
910 return (0);
911 }
912 }
913 dq->dq_flags |= DQ_LOCK;
914 auio.uio_iov = &aiov;
915 auio.uio_iovcnt = 1;
916 aiov.iov_base = (caddr_t)&dq->dq_dqb;
917 aiov.iov_len = sizeof (struct dqblk);
918 auio.uio_resid = sizeof (struct dqblk);
919 auio.uio_offset = (off_t)(dq->dq_id * sizeof (struct dqblk));
920 auio.uio_segflg = UIO_SYSSPACE;
921 auio.uio_rw = UIO_WRITE;
922 auio.uio_procp = (struct proc *)0;
923 error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]);
924 if (auio.uio_resid && error == 0)
925 error = EIO;
926 if (dq->dq_flags & DQ_WANT)
927 wakeup((caddr_t)dq);
928 dq->dq_flags &= ~(DQ_MOD|DQ_LOCK|DQ_WANT);
929 if (vp != dqvp)
930 VOP_UNLOCK(dqvp, 0);
931 return (error);
932 }
933
934 /*
935 * Flush all entries from the cache for a particular vnode.
936 */
937 void
938 dqflush(vp)
939 struct vnode *vp;
940 {
941 struct dquot *dq, *nextdq;
942 struct dqhashhead *dqh;
943
944 /*
945 * Move all dquot's that used to refer to this quota
946 * file off their hash chains (they will eventually
947 * fall off the head of the free list and be re-used).
948 */
949 for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) {
950 for (dq = LIST_FIRST(dqh); dq; dq = nextdq) {
951 nextdq = LIST_NEXT(dq, dq_hash);
952 if (dq->dq_ump->um_quotas[dq->dq_type] != vp)
953 continue;
954 if (dq->dq_cnt)
955 panic("dqflush: stray dquot");
956 LIST_REMOVE(dq, dq_hash);
957 dq->dq_ump = NULL;
958 }
959 }
960 }
961