ufs_quota.c revision 1.21 1 /* $NetBSD: ufs_quota.c,v 1.21 2001/09/15 16:13:07 chs Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1990, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Robert Elz at The University of Melbourne.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95
39 */
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/systm.h>
43 #include <sys/namei.h>
44 #include <sys/malloc.h>
45 #include <sys/file.h>
46 #include <sys/proc.h>
47 #include <sys/vnode.h>
48 #include <sys/mount.h>
49
50 #include <ufs/ufs/quota.h>
51 #include <ufs/ufs/inode.h>
52 #include <ufs/ufs/ufsmount.h>
53 #include <ufs/ufs/ufs_extern.h>
54
55 /*
56 * Quota name to error message mapping.
57 */
58 static char *quotatypes[] = INITQFNAMES;
59
60 /*
61 * Set up the quotas for an inode.
62 *
63 * This routine completely defines the semantics of quotas.
64 * If other criterion want to be used to establish quotas, the
65 * MAXQUOTAS value in quotas.h should be increased, and the
66 * additional dquots set up here.
67 */
68 int
69 getinoquota(ip)
70 struct inode *ip;
71 {
72 struct ufsmount *ump;
73 struct vnode *vp = ITOV(ip);
74 int error;
75
76 ump = VFSTOUFS(vp->v_mount);
77 /*
78 * Set up the user quota based on file uid.
79 * EINVAL means that quotas are not enabled.
80 */
81 if (ip->i_dquot[USRQUOTA] == NODQUOT &&
82 (error =
83 dqget(vp, ip->i_ffs_uid, ump, USRQUOTA, &ip->i_dquot[USRQUOTA])) &&
84 error != EINVAL)
85 return (error);
86 /*
87 * Set up the group quota based on file gid.
88 * EINVAL means that quotas are not enabled.
89 */
90 if (ip->i_dquot[GRPQUOTA] == NODQUOT &&
91 (error =
92 dqget(vp, ip->i_ffs_gid, ump, GRPQUOTA, &ip->i_dquot[GRPQUOTA])) &&
93 error != EINVAL)
94 return (error);
95 return (0);
96 }
97
98 /*
99 * Update disk usage, and take corrective action.
100 */
101 int
102 chkdq(ip, change, cred, flags)
103 struct inode *ip;
104 long change;
105 struct ucred *cred;
106 int flags;
107 {
108 struct dquot *dq;
109 int i;
110 int ncurblocks, error;
111
112 #ifdef DIAGNOSTIC
113 if ((flags & CHOWN) == 0)
114 chkdquot(ip);
115 #endif
116 if (change == 0)
117 return (0);
118 if (change < 0) {
119 for (i = 0; i < MAXQUOTAS; i++) {
120 if ((dq = ip->i_dquot[i]) == NODQUOT)
121 continue;
122 while (dq->dq_flags & DQ_LOCK) {
123 dq->dq_flags |= DQ_WANT;
124 (void) tsleep(dq, PINOD+1, "chkdq", 0);
125 }
126 ncurblocks = dq->dq_curblocks + change;
127 if (ncurblocks >= 0)
128 dq->dq_curblocks = ncurblocks;
129 else
130 dq->dq_curblocks = 0;
131 dq->dq_flags &= ~DQ_BLKS;
132 dq->dq_flags |= DQ_MOD;
133 }
134 return (0);
135 }
136 if ((flags & FORCE) == 0 &&
137 (cred != NOCRED && cred->cr_uid != 0)) {
138 for (i = 0; i < MAXQUOTAS; i++) {
139 if ((dq = ip->i_dquot[i]) == NODQUOT)
140 continue;
141 if ((error = chkdqchg(ip, change, cred, i)) != 0)
142 return (error);
143 }
144 }
145 for (i = 0; i < MAXQUOTAS; i++) {
146 if ((dq = ip->i_dquot[i]) == NODQUOT)
147 continue;
148 while (dq->dq_flags & DQ_LOCK) {
149 dq->dq_flags |= DQ_WANT;
150 (void) tsleep(dq, PINOD+1, "chkdq", 0);
151 }
152 dq->dq_curblocks += change;
153 dq->dq_flags |= DQ_MOD;
154 }
155 return (0);
156 }
157
158 /*
159 * Check for a valid change to a users allocation.
160 * Issue an error message if appropriate.
161 */
162 int
163 chkdqchg(ip, change, cred, type)
164 struct inode *ip;
165 long change;
166 struct ucred *cred;
167 int type;
168 {
169 struct dquot *dq = ip->i_dquot[type];
170 long ncurblocks = dq->dq_curblocks + change;
171
172 /*
173 * If user would exceed their hard limit, disallow space allocation.
174 */
175 if (ncurblocks >= dq->dq_bhardlimit && dq->dq_bhardlimit) {
176 if ((dq->dq_flags & DQ_BLKS) == 0 &&
177 ip->i_ffs_uid == cred->cr_uid) {
178 uprintf("\n%s: write failed, %s disk limit reached\n",
179 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
180 quotatypes[type]);
181 dq->dq_flags |= DQ_BLKS;
182 }
183 return (EDQUOT);
184 }
185 /*
186 * If user is over their soft limit for too long, disallow space
187 * allocation. Reset time limit as they cross their soft limit.
188 */
189 if (ncurblocks >= dq->dq_bsoftlimit && dq->dq_bsoftlimit) {
190 if (dq->dq_curblocks < dq->dq_bsoftlimit) {
191 dq->dq_btime = time.tv_sec +
192 VFSTOUFS(ITOV(ip)->v_mount)->um_btime[type];
193 if (ip->i_ffs_uid == cred->cr_uid)
194 uprintf("\n%s: warning, %s %s\n",
195 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
196 quotatypes[type], "disk quota exceeded");
197 return (0);
198 }
199 if (time.tv_sec > dq->dq_btime) {
200 if ((dq->dq_flags & DQ_BLKS) == 0 &&
201 ip->i_ffs_uid == cred->cr_uid) {
202 uprintf("\n%s: write failed, %s %s\n",
203 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
204 quotatypes[type],
205 "disk quota exceeded for too long");
206 dq->dq_flags |= DQ_BLKS;
207 }
208 return (EDQUOT);
209 }
210 }
211 return (0);
212 }
213
214 /*
215 * Check the inode limit, applying corrective action.
216 */
217 int
218 chkiq(ip, change, cred, flags)
219 struct inode *ip;
220 long change;
221 struct ucred *cred;
222 int flags;
223 {
224 struct dquot *dq;
225 int i;
226 int ncurinodes, error;
227
228 #ifdef DIAGNOSTIC
229 if ((flags & CHOWN) == 0)
230 chkdquot(ip);
231 #endif
232 if (change == 0)
233 return (0);
234 if (change < 0) {
235 for (i = 0; i < MAXQUOTAS; i++) {
236 if ((dq = ip->i_dquot[i]) == NODQUOT)
237 continue;
238 while (dq->dq_flags & DQ_LOCK) {
239 dq->dq_flags |= DQ_WANT;
240 (void) tsleep(dq, PINOD+1, "chkiq", 0);
241 }
242 ncurinodes = dq->dq_curinodes + change;
243 if (ncurinodes >= 0)
244 dq->dq_curinodes = ncurinodes;
245 else
246 dq->dq_curinodes = 0;
247 dq->dq_flags &= ~DQ_INODS;
248 dq->dq_flags |= DQ_MOD;
249 }
250 return (0);
251 }
252 if ((flags & FORCE) == 0 && cred->cr_uid != 0) {
253 for (i = 0; i < MAXQUOTAS; i++) {
254 if ((dq = ip->i_dquot[i]) == NODQUOT)
255 continue;
256 if ((error = chkiqchg(ip, change, cred, i)) != 0)
257 return (error);
258 }
259 }
260 for (i = 0; i < MAXQUOTAS; i++) {
261 if ((dq = ip->i_dquot[i]) == NODQUOT)
262 continue;
263 while (dq->dq_flags & DQ_LOCK) {
264 dq->dq_flags |= DQ_WANT;
265 (void) tsleep(dq, PINOD+1, "chkiq", 0);
266 }
267 dq->dq_curinodes += change;
268 dq->dq_flags |= DQ_MOD;
269 }
270 return (0);
271 }
272
273 /*
274 * Check for a valid change to a users allocation.
275 * Issue an error message if appropriate.
276 */
277 int
278 chkiqchg(ip, change, cred, type)
279 struct inode *ip;
280 long change;
281 struct ucred *cred;
282 int type;
283 {
284 struct dquot *dq = ip->i_dquot[type];
285 long ncurinodes = dq->dq_curinodes + change;
286
287 /*
288 * If user would exceed their hard limit, disallow inode allocation.
289 */
290 if (ncurinodes >= dq->dq_ihardlimit && dq->dq_ihardlimit) {
291 if ((dq->dq_flags & DQ_INODS) == 0 &&
292 ip->i_ffs_uid == cred->cr_uid) {
293 uprintf("\n%s: write failed, %s inode limit reached\n",
294 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
295 quotatypes[type]);
296 dq->dq_flags |= DQ_INODS;
297 }
298 return (EDQUOT);
299 }
300 /*
301 * If user is over their soft limit for too long, disallow inode
302 * allocation. Reset time limit as they cross their soft limit.
303 */
304 if (ncurinodes >= dq->dq_isoftlimit && dq->dq_isoftlimit) {
305 if (dq->dq_curinodes < dq->dq_isoftlimit) {
306 dq->dq_itime = time.tv_sec +
307 VFSTOUFS(ITOV(ip)->v_mount)->um_itime[type];
308 if (ip->i_ffs_uid == cred->cr_uid)
309 uprintf("\n%s: warning, %s %s\n",
310 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
311 quotatypes[type], "inode quota exceeded");
312 return (0);
313 }
314 if (time.tv_sec > dq->dq_itime) {
315 if ((dq->dq_flags & DQ_INODS) == 0 &&
316 ip->i_ffs_uid == cred->cr_uid) {
317 uprintf("\n%s: write failed, %s %s\n",
318 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
319 quotatypes[type],
320 "inode quota exceeded for too long");
321 dq->dq_flags |= DQ_INODS;
322 }
323 return (EDQUOT);
324 }
325 }
326 return (0);
327 }
328
329 #ifdef DIAGNOSTIC
330 /*
331 * On filesystems with quotas enabled, it is an error for a file to change
332 * size and not to have a dquot structure associated with it.
333 */
334 void
335 chkdquot(ip)
336 struct inode *ip;
337 {
338 struct ufsmount *ump = VFSTOUFS(ITOV(ip)->v_mount);
339 int i;
340
341 for (i = 0; i < MAXQUOTAS; i++) {
342 if (ump->um_quotas[i] == NULLVP ||
343 (ump->um_qflags[i] & (QTF_OPENING|QTF_CLOSING)))
344 continue;
345 if (ip->i_dquot[i] == NODQUOT) {
346 vprint("chkdquot: missing dquot", ITOV(ip));
347 panic("missing dquot");
348 }
349 }
350 }
351 #endif
352
353 /*
354 * Code to process quotactl commands.
355 */
356
357 /*
358 * Q_QUOTAON - set up a quota file for a particular file system.
359 */
360 int
361 quotaon(p, mp, type, fname)
362 struct proc *p;
363 struct mount *mp;
364 int type;
365 caddr_t fname;
366 {
367 struct ufsmount *ump = VFSTOUFS(mp);
368 struct vnode *vp, **vpp;
369 struct vnode *nextvp;
370 struct dquot *dq;
371 int error;
372 struct nameidata nd;
373
374 vpp = &ump->um_quotas[type];
375 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname, p);
376 if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0)
377 return (error);
378 vp = nd.ni_vp;
379 VOP_UNLOCK(vp, 0);
380 if (vp->v_type != VREG) {
381 (void) vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
382 return (EACCES);
383 }
384 if (*vpp != vp)
385 quotaoff(p, mp, type);
386 ump->um_qflags[type] |= QTF_OPENING;
387 mp->mnt_flag |= MNT_QUOTA;
388 vp->v_flag |= VSYSTEM;
389 *vpp = vp;
390 /*
391 * Save the credential of the process that turned on quotas.
392 * Set up the time limits for this quota.
393 */
394 crhold(p->p_ucred);
395 ump->um_cred[type] = p->p_ucred;
396 ump->um_btime[type] = MAX_DQ_TIME;
397 ump->um_itime[type] = MAX_IQ_TIME;
398 if (dqget(NULLVP, 0, ump, type, &dq) == 0) {
399 if (dq->dq_btime > 0)
400 ump->um_btime[type] = dq->dq_btime;
401 if (dq->dq_itime > 0)
402 ump->um_itime[type] = dq->dq_itime;
403 dqrele(NULLVP, dq);
404 }
405 /*
406 * Search vnodes associated with this mount point,
407 * adding references to quota file being opened.
408 * NB: only need to add dquot's for inodes being modified.
409 */
410 again:
411 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
412 nextvp = LIST_NEXT(vp, v_mntvnodes);
413 if (vp->v_type == VNON ||vp->v_writecount == 0)
414 continue;
415 if (vget(vp, LK_EXCLUSIVE))
416 goto again;
417 if ((error = getinoquota(VTOI(vp))) != 0) {
418 vput(vp);
419 break;
420 }
421 vput(vp);
422 if (LIST_NEXT(vp, v_mntvnodes) != nextvp || vp->v_mount != mp)
423 goto again;
424 }
425 ump->um_qflags[type] &= ~QTF_OPENING;
426 if (error)
427 quotaoff(p, mp, type);
428 return (error);
429 }
430
431 /*
432 * Q_QUOTAOFF - turn off disk quotas for a filesystem.
433 */
434 int
435 quotaoff(p, mp, type)
436 struct proc *p;
437 struct mount *mp;
438 int type;
439 {
440 struct vnode *vp;
441 struct vnode *qvp, *nextvp;
442 struct ufsmount *ump = VFSTOUFS(mp);
443 struct dquot *dq;
444 struct inode *ip;
445 int error;
446
447 if ((qvp = ump->um_quotas[type]) == NULLVP)
448 return (0);
449 ump->um_qflags[type] |= QTF_CLOSING;
450 /*
451 * Search vnodes associated with this mount point,
452 * deleting any references to quota file being closed.
453 */
454 again:
455 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
456 nextvp = LIST_NEXT(vp, v_mntvnodes);
457 if (vp->v_type == VNON)
458 continue;
459 if (vget(vp, LK_EXCLUSIVE))
460 goto again;
461 ip = VTOI(vp);
462 dq = ip->i_dquot[type];
463 ip->i_dquot[type] = NODQUOT;
464 dqrele(vp, dq);
465 vput(vp);
466 if (LIST_NEXT(vp, v_mntvnodes) != nextvp || vp->v_mount != mp)
467 goto again;
468 }
469 dqflush(qvp);
470 qvp->v_flag &= ~VSYSTEM;
471 error = vn_close(qvp, FREAD|FWRITE, p->p_ucred, p);
472 ump->um_quotas[type] = NULLVP;
473 crfree(ump->um_cred[type]);
474 ump->um_cred[type] = NOCRED;
475 ump->um_qflags[type] &= ~QTF_CLOSING;
476 for (type = 0; type < MAXQUOTAS; type++)
477 if (ump->um_quotas[type] != NULLVP)
478 break;
479 if (type == MAXQUOTAS)
480 mp->mnt_flag &= ~MNT_QUOTA;
481 return (error);
482 }
483
484 /*
485 * Q_GETQUOTA - return current values in a dqblk structure.
486 */
487 int
488 getquota(mp, id, type, addr)
489 struct mount *mp;
490 u_long id;
491 int type;
492 caddr_t addr;
493 {
494 struct dquot *dq;
495 int error;
496
497 if ((error = dqget(NULLVP, id, VFSTOUFS(mp), type, &dq)) != 0)
498 return (error);
499 error = copyout((caddr_t)&dq->dq_dqb, addr, sizeof (struct dqblk));
500 dqrele(NULLVP, dq);
501 return (error);
502 }
503
504 /*
505 * Q_SETQUOTA - assign an entire dqblk structure.
506 */
507 int
508 setquota(mp, id, type, addr)
509 struct mount *mp;
510 u_long id;
511 int type;
512 caddr_t addr;
513 {
514 struct dquot *dq;
515 struct dquot *ndq;
516 struct ufsmount *ump = VFSTOUFS(mp);
517 struct dqblk newlim;
518 int error;
519
520 error = copyin(addr, (caddr_t)&newlim, sizeof (struct dqblk));
521 if (error)
522 return (error);
523 if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0)
524 return (error);
525 dq = ndq;
526 while (dq->dq_flags & DQ_LOCK) {
527 dq->dq_flags |= DQ_WANT;
528 (void) tsleep(dq, PINOD+1, "setquota", 0);
529 }
530 /*
531 * Copy all but the current values.
532 * Reset time limit if previously had no soft limit or were
533 * under it, but now have a soft limit and are over it.
534 */
535 newlim.dqb_curblocks = dq->dq_curblocks;
536 newlim.dqb_curinodes = dq->dq_curinodes;
537 if (dq->dq_id != 0) {
538 newlim.dqb_btime = dq->dq_btime;
539 newlim.dqb_itime = dq->dq_itime;
540 }
541 if (newlim.dqb_bsoftlimit &&
542 dq->dq_curblocks >= newlim.dqb_bsoftlimit &&
543 (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit))
544 newlim.dqb_btime = time.tv_sec + ump->um_btime[type];
545 if (newlim.dqb_isoftlimit &&
546 dq->dq_curinodes >= newlim.dqb_isoftlimit &&
547 (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit))
548 newlim.dqb_itime = time.tv_sec + ump->um_itime[type];
549 dq->dq_dqb = newlim;
550 if (dq->dq_curblocks < dq->dq_bsoftlimit)
551 dq->dq_flags &= ~DQ_BLKS;
552 if (dq->dq_curinodes < dq->dq_isoftlimit)
553 dq->dq_flags &= ~DQ_INODS;
554 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
555 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
556 dq->dq_flags |= DQ_FAKE;
557 else
558 dq->dq_flags &= ~DQ_FAKE;
559 dq->dq_flags |= DQ_MOD;
560 dqrele(NULLVP, dq);
561 return (0);
562 }
563
564 /*
565 * Q_SETUSE - set current inode and block usage.
566 */
567 int
568 setuse(mp, id, type, addr)
569 struct mount *mp;
570 u_long id;
571 int type;
572 caddr_t addr;
573 {
574 struct dquot *dq;
575 struct ufsmount *ump = VFSTOUFS(mp);
576 struct dquot *ndq;
577 struct dqblk usage;
578 int error;
579
580 error = copyin(addr, (caddr_t)&usage, sizeof (struct dqblk));
581 if (error)
582 return (error);
583 if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0)
584 return (error);
585 dq = ndq;
586 while (dq->dq_flags & DQ_LOCK) {
587 dq->dq_flags |= DQ_WANT;
588 (void) tsleep(dq, PINOD+1, "setuse", 0);
589 }
590 /*
591 * Reset time limit if have a soft limit and were
592 * previously under it, but are now over it.
593 */
594 if (dq->dq_bsoftlimit && dq->dq_curblocks < dq->dq_bsoftlimit &&
595 usage.dqb_curblocks >= dq->dq_bsoftlimit)
596 dq->dq_btime = time.tv_sec + ump->um_btime[type];
597 if (dq->dq_isoftlimit && dq->dq_curinodes < dq->dq_isoftlimit &&
598 usage.dqb_curinodes >= dq->dq_isoftlimit)
599 dq->dq_itime = time.tv_sec + ump->um_itime[type];
600 dq->dq_curblocks = usage.dqb_curblocks;
601 dq->dq_curinodes = usage.dqb_curinodes;
602 if (dq->dq_curblocks < dq->dq_bsoftlimit)
603 dq->dq_flags &= ~DQ_BLKS;
604 if (dq->dq_curinodes < dq->dq_isoftlimit)
605 dq->dq_flags &= ~DQ_INODS;
606 dq->dq_flags |= DQ_MOD;
607 dqrele(NULLVP, dq);
608 return (0);
609 }
610
611 /*
612 * Q_SYNC - sync quota files to disk.
613 */
614 int
615 qsync(mp)
616 struct mount *mp;
617 {
618 struct ufsmount *ump = VFSTOUFS(mp);
619 struct vnode *vp, *nextvp;
620 struct dquot *dq;
621 int i, error;
622
623 /*
624 * Check if the mount point has any quotas.
625 * If not, simply return.
626 */
627 for (i = 0; i < MAXQUOTAS; i++)
628 if (ump->um_quotas[i] != NULLVP)
629 break;
630 if (i == MAXQUOTAS)
631 return (0);
632 /*
633 * Search vnodes associated with this mount point,
634 * synchronizing any modified dquot structures.
635 */
636 simple_lock(&mntvnode_slock);
637 again:
638 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
639 if (vp->v_mount != mp)
640 goto again;
641 nextvp = LIST_NEXT(vp, v_mntvnodes);
642 if (vp->v_type == VNON)
643 continue;
644 simple_lock(&vp->v_interlock);
645 simple_unlock(&mntvnode_slock);
646 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
647 if (error) {
648 simple_lock(&mntvnode_slock);
649 if (error == ENOENT)
650 goto again;
651 continue;
652 }
653 for (i = 0; i < MAXQUOTAS; i++) {
654 dq = VTOI(vp)->i_dquot[i];
655 if (dq != NODQUOT && (dq->dq_flags & DQ_MOD))
656 dqsync(vp, dq);
657 }
658 vput(vp);
659 simple_lock(&mntvnode_slock);
660 if (LIST_NEXT(vp, v_mntvnodes) != nextvp)
661 goto again;
662 }
663 simple_unlock(&mntvnode_slock);
664 return (0);
665 }
666
667 /*
668 * Code pertaining to management of the in-core dquot data structures.
669 */
670 #define DQHASH(dqvp, id) \
671 (((((long)(dqvp)) >> 8) + id) & dqhash)
672 LIST_HEAD(dqhashhead, dquot) *dqhashtbl;
673 u_long dqhash;
674
675 /*
676 * Dquot free list.
677 */
678 #define DQUOTINC 5 /* minimum free dquots desired */
679 TAILQ_HEAD(dqfreelist, dquot) dqfreelist;
680 long numdquot, desireddquot = DQUOTINC;
681
682 /*
683 * Initialize the quota system.
684 */
685 void
686 dqinit()
687 {
688 dqhashtbl =
689 hashinit(desiredvnodes, HASH_LIST, M_DQUOT, M_WAITOK, &dqhash);
690 TAILQ_INIT(&dqfreelist);
691 }
692
693 void
694 dqreinit()
695 {
696 struct dquot *dq;
697 struct dqhashhead *oldhash, *hash;
698 struct vnode *dqvp;
699 u_long oldmask, mask, hashval;
700 int i;
701
702 hash = hashinit(desiredvnodes, HASH_LIST, M_DQUOT, M_WAITOK, &mask);
703 oldhash = dqhashtbl;
704 oldmask = dqhash;
705 dqhashtbl = hash;
706 dqhash = mask;
707 for (i = 0; i <= oldmask; i++) {
708 while ((dq = LIST_FIRST(&oldhash[i])) != NULL) {
709 dqvp = dq->dq_ump->um_quotas[dq->dq_type];
710 LIST_REMOVE(dq, dq_hash);
711 hashval = DQHASH(dqvp, dq->dq_id);
712 LIST_INSERT_HEAD(&dqhashtbl[hashval], dq, dq_hash);
713 }
714 }
715 hashdone(oldhash, M_DQUOT);
716 }
717
718 /*
719 * Free resources held by quota system.
720 */
721 void
722 dqdone()
723 {
724 hashdone(dqhashtbl, M_DQUOT);
725 }
726
727 /*
728 * Obtain a dquot structure for the specified identifier and quota file
729 * reading the information from the file if necessary.
730 */
731 int
732 dqget(vp, id, ump, type, dqp)
733 struct vnode *vp;
734 u_long id;
735 struct ufsmount *ump;
736 int type;
737 struct dquot **dqp;
738 {
739 struct dquot *dq;
740 struct dqhashhead *dqh;
741 struct vnode *dqvp;
742 struct iovec aiov;
743 struct uio auio;
744 int error;
745
746 dqvp = ump->um_quotas[type];
747 if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) {
748 *dqp = NODQUOT;
749 return (EINVAL);
750 }
751 /*
752 * Check the cache first.
753 */
754 dqh = &dqhashtbl[DQHASH(dqvp, id)];
755 LIST_FOREACH(dq, dqh, dq_hash) {
756 if (dq->dq_id != id ||
757 dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
758 continue;
759 /*
760 * Cache hit with no references. Take
761 * the structure off the free list.
762 */
763 if (dq->dq_cnt == 0)
764 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
765 DQREF(dq);
766 *dqp = dq;
767 return (0);
768 }
769 /*
770 * Not in cache, allocate a new one.
771 */
772 if (dqfreelist.tqh_first == NODQUOT &&
773 numdquot < MAXQUOTAS * desiredvnodes)
774 desireddquot += DQUOTINC;
775 if (numdquot < desireddquot) {
776 dq = (struct dquot *)malloc(sizeof *dq, M_DQUOT, M_WAITOK);
777 memset((char *)dq, 0, sizeof *dq);
778 numdquot++;
779 } else {
780 if ((dq = dqfreelist.tqh_first) == NULL) {
781 tablefull("dquot",
782 "increase kern.maxvnodes or NVNODE");
783 *dqp = NODQUOT;
784 return (EUSERS);
785 }
786 if (dq->dq_cnt || (dq->dq_flags & DQ_MOD))
787 panic("free dquot isn't");
788 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
789 LIST_REMOVE(dq, dq_hash);
790 }
791 /*
792 * Initialize the contents of the dquot structure.
793 */
794 if (vp != dqvp)
795 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
796 LIST_INSERT_HEAD(dqh, dq, dq_hash);
797 DQREF(dq);
798 dq->dq_flags = DQ_LOCK;
799 dq->dq_id = id;
800 dq->dq_ump = ump;
801 dq->dq_type = type;
802 auio.uio_iov = &aiov;
803 auio.uio_iovcnt = 1;
804 aiov.iov_base = (caddr_t)&dq->dq_dqb;
805 aiov.iov_len = sizeof (struct dqblk);
806 auio.uio_resid = sizeof (struct dqblk);
807 auio.uio_offset = (off_t)(id * sizeof (struct dqblk));
808 auio.uio_segflg = UIO_SYSSPACE;
809 auio.uio_rw = UIO_READ;
810 auio.uio_procp = (struct proc *)0;
811 error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
812 if (auio.uio_resid == sizeof(struct dqblk) && error == 0)
813 memset((caddr_t)&dq->dq_dqb, 0, sizeof(struct dqblk));
814 if (vp != dqvp)
815 VOP_UNLOCK(dqvp, 0);
816 if (dq->dq_flags & DQ_WANT)
817 wakeup((caddr_t)dq);
818 dq->dq_flags = 0;
819 /*
820 * I/O error in reading quota file, release
821 * quota structure and reflect problem to caller.
822 */
823 if (error) {
824 LIST_REMOVE(dq, dq_hash);
825 dqrele(vp, dq);
826 *dqp = NODQUOT;
827 return (error);
828 }
829 /*
830 * Check for no limit to enforce.
831 * Initialize time values if necessary.
832 */
833 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
834 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
835 dq->dq_flags |= DQ_FAKE;
836 if (dq->dq_id != 0) {
837 if (dq->dq_btime == 0)
838 dq->dq_btime = time.tv_sec + ump->um_btime[type];
839 if (dq->dq_itime == 0)
840 dq->dq_itime = time.tv_sec + ump->um_itime[type];
841 }
842 *dqp = dq;
843 return (0);
844 }
845
846 /*
847 * Obtain a reference to a dquot.
848 */
849 void
850 dqref(dq)
851 struct dquot *dq;
852 {
853
854 dq->dq_cnt++;
855 }
856
857 /*
858 * Release a reference to a dquot.
859 */
860 void
861 dqrele(vp, dq)
862 struct vnode *vp;
863 struct dquot *dq;
864 {
865
866 if (dq == NODQUOT)
867 return;
868 if (dq->dq_cnt > 1) {
869 dq->dq_cnt--;
870 return;
871 }
872 if (dq->dq_flags & DQ_MOD)
873 (void) dqsync(vp, dq);
874 if (--dq->dq_cnt > 0)
875 return;
876 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
877 }
878
879 /*
880 * Update the disk quota in the quota file.
881 */
882 int
883 dqsync(vp, dq)
884 struct vnode *vp;
885 struct dquot *dq;
886 {
887 struct vnode *dqvp;
888 struct iovec aiov;
889 struct uio auio;
890 int error;
891
892 if (dq == NODQUOT)
893 panic("dqsync: dquot");
894 if ((dq->dq_flags & DQ_MOD) == 0)
895 return (0);
896 if ((dqvp = dq->dq_ump->um_quotas[dq->dq_type]) == NULLVP)
897 panic("dqsync: file");
898 if (vp != dqvp)
899 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
900 while (dq->dq_flags & DQ_LOCK) {
901 dq->dq_flags |= DQ_WANT;
902 (void) tsleep(dq, PINOD+2, "dqsync", 0);
903 if ((dq->dq_flags & DQ_MOD) == 0) {
904 if (vp != dqvp)
905 VOP_UNLOCK(dqvp, 0);
906 return (0);
907 }
908 }
909 dq->dq_flags |= DQ_LOCK;
910 auio.uio_iov = &aiov;
911 auio.uio_iovcnt = 1;
912 aiov.iov_base = (caddr_t)&dq->dq_dqb;
913 aiov.iov_len = sizeof (struct dqblk);
914 auio.uio_resid = sizeof (struct dqblk);
915 auio.uio_offset = (off_t)(dq->dq_id * sizeof (struct dqblk));
916 auio.uio_segflg = UIO_SYSSPACE;
917 auio.uio_rw = UIO_WRITE;
918 auio.uio_procp = (struct proc *)0;
919 error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]);
920 if (auio.uio_resid && error == 0)
921 error = EIO;
922 if (dq->dq_flags & DQ_WANT)
923 wakeup((caddr_t)dq);
924 dq->dq_flags &= ~(DQ_MOD|DQ_LOCK|DQ_WANT);
925 if (vp != dqvp)
926 VOP_UNLOCK(dqvp, 0);
927 return (error);
928 }
929
930 /*
931 * Flush all entries from the cache for a particular vnode.
932 */
933 void
934 dqflush(vp)
935 struct vnode *vp;
936 {
937 struct dquot *dq, *nextdq;
938 struct dqhashhead *dqh;
939
940 /*
941 * Move all dquot's that used to refer to this quota
942 * file off their hash chains (they will eventually
943 * fall off the head of the free list and be re-used).
944 */
945 for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) {
946 for (dq = LIST_FIRST(dqh); dq; dq = nextdq) {
947 nextdq = LIST_NEXT(dq, dq_hash);
948 if (dq->dq_ump->um_quotas[dq->dq_type] != vp)
949 continue;
950 if (dq->dq_cnt)
951 panic("dqflush: stray dquot");
952 LIST_REMOVE(dq, dq_hash);
953 dq->dq_ump = NULL;
954 }
955 }
956 }
957