ufs_quota.c revision 1.31 1 /* $NetBSD: ufs_quota.c,v 1.31 2004/08/15 07:19:58 mycroft Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1990, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Robert Elz at The University of Melbourne.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: ufs_quota.c,v 1.31 2004/08/15 07:19:58 mycroft Exp $");
39
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/systm.h>
43 #include <sys/namei.h>
44 #include <sys/malloc.h>
45 #include <sys/file.h>
46 #include <sys/proc.h>
47 #include <sys/vnode.h>
48 #include <sys/mount.h>
49
50 #include <ufs/ufs/quota.h>
51 #include <ufs/ufs/inode.h>
52 #include <ufs/ufs/ufsmount.h>
53 #include <ufs/ufs/ufs_extern.h>
54
55 /*
56 * Quota name to error message mapping.
57 */
58 static char *quotatypes[] = INITQFNAMES;
59
60 /*
61 * Set up the quotas for an inode.
62 *
63 * This routine completely defines the semantics of quotas.
64 * If other criterion want to be used to establish quotas, the
65 * MAXQUOTAS value in quotas.h should be increased, and the
66 * additional dquots set up here.
67 */
68 int
69 getinoquota(ip)
70 struct inode *ip;
71 {
72 struct ufsmount *ump = ip->i_ump;
73 struct vnode *vp = ITOV(ip);
74 int error;
75
76 /*
77 * Set up the user quota based on file uid.
78 * EINVAL means that quotas are not enabled.
79 */
80 if (ip->i_dquot[USRQUOTA] == NODQUOT &&
81 (error =
82 dqget(vp, ip->i_uid, ump, USRQUOTA, &ip->i_dquot[USRQUOTA])) &&
83 error != EINVAL)
84 return (error);
85 /*
86 * Set up the group quota based on file gid.
87 * EINVAL means that quotas are not enabled.
88 */
89 if (ip->i_dquot[GRPQUOTA] == NODQUOT &&
90 (error =
91 dqget(vp, ip->i_gid, ump, GRPQUOTA, &ip->i_dquot[GRPQUOTA])) &&
92 error != EINVAL)
93 return (error);
94 return (0);
95 }
96
97 /*
98 * Update disk usage, and take corrective action.
99 */
100 int
101 chkdq(ip, change, cred, flags)
102 struct inode *ip;
103 int64_t change;
104 struct ucred *cred;
105 int flags;
106 {
107 struct dquot *dq;
108 int i;
109 int ncurblocks, error;
110
111 #ifdef DIAGNOSTIC
112 if ((flags & CHOWN) == 0)
113 chkdquot(ip);
114 #endif
115 if (change == 0)
116 return (0);
117 if (change < 0) {
118 for (i = 0; i < MAXQUOTAS; i++) {
119 if ((dq = ip->i_dquot[i]) == NODQUOT)
120 continue;
121 while (dq->dq_flags & DQ_LOCK) {
122 dq->dq_flags |= DQ_WANT;
123 (void) tsleep(dq, PINOD+1, "chkdq", 0);
124 }
125 ncurblocks = dq->dq_curblocks + change;
126 if (ncurblocks >= 0)
127 dq->dq_curblocks = ncurblocks;
128 else
129 dq->dq_curblocks = 0;
130 dq->dq_flags &= ~DQ_BLKS;
131 dq->dq_flags |= DQ_MOD;
132 }
133 return (0);
134 }
135 if ((flags & FORCE) == 0 &&
136 (cred != NOCRED && cred->cr_uid != 0)) {
137 for (i = 0; i < MAXQUOTAS; i++) {
138 if ((dq = ip->i_dquot[i]) == NODQUOT)
139 continue;
140 if ((error = chkdqchg(ip, change, cred, i)) != 0)
141 return (error);
142 }
143 }
144 for (i = 0; i < MAXQUOTAS; i++) {
145 if ((dq = ip->i_dquot[i]) == NODQUOT)
146 continue;
147 while (dq->dq_flags & DQ_LOCK) {
148 dq->dq_flags |= DQ_WANT;
149 (void) tsleep(dq, PINOD+1, "chkdq", 0);
150 }
151 dq->dq_curblocks += change;
152 dq->dq_flags |= DQ_MOD;
153 }
154 return (0);
155 }
156
157 /*
158 * Check for a valid change to a users allocation.
159 * Issue an error message if appropriate.
160 */
161 int
162 chkdqchg(ip, change, cred, type)
163 struct inode *ip;
164 int64_t change;
165 struct ucred *cred;
166 int type;
167 {
168 struct dquot *dq = ip->i_dquot[type];
169 long ncurblocks = dq->dq_curblocks + change;
170
171 /*
172 * If user would exceed their hard limit, disallow space allocation.
173 */
174 if (ncurblocks >= dq->dq_bhardlimit && dq->dq_bhardlimit) {
175 if ((dq->dq_flags & DQ_BLKS) == 0 &&
176 ip->i_uid == cred->cr_uid) {
177 uprintf("\n%s: write failed, %s disk limit reached\n",
178 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
179 quotatypes[type]);
180 dq->dq_flags |= DQ_BLKS;
181 }
182 return (EDQUOT);
183 }
184 /*
185 * If user is over their soft limit for too long, disallow space
186 * allocation. Reset time limit as they cross their soft limit.
187 */
188 if (ncurblocks >= dq->dq_bsoftlimit && dq->dq_bsoftlimit) {
189 if (dq->dq_curblocks < dq->dq_bsoftlimit) {
190 dq->dq_btime = time.tv_sec + ip->i_ump->um_btime[type];
191 if (ip->i_uid == cred->cr_uid)
192 uprintf("\n%s: warning, %s %s\n",
193 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
194 quotatypes[type], "disk quota exceeded");
195 return (0);
196 }
197 if (time.tv_sec > dq->dq_btime) {
198 if ((dq->dq_flags & DQ_BLKS) == 0 &&
199 ip->i_uid == cred->cr_uid) {
200 uprintf("\n%s: write failed, %s %s\n",
201 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
202 quotatypes[type],
203 "disk quota exceeded for too long");
204 dq->dq_flags |= DQ_BLKS;
205 }
206 return (EDQUOT);
207 }
208 }
209 return (0);
210 }
211
212 /*
213 * Check the inode limit, applying corrective action.
214 */
215 int
216 chkiq(ip, change, cred, flags)
217 struct inode *ip;
218 int32_t change;
219 struct ucred *cred;
220 int flags;
221 {
222 struct dquot *dq;
223 int i;
224 int ncurinodes, error;
225
226 #ifdef DIAGNOSTIC
227 if ((flags & CHOWN) == 0)
228 chkdquot(ip);
229 #endif
230 if (change == 0)
231 return (0);
232 if (change < 0) {
233 for (i = 0; i < MAXQUOTAS; i++) {
234 if ((dq = ip->i_dquot[i]) == NODQUOT)
235 continue;
236 while (dq->dq_flags & DQ_LOCK) {
237 dq->dq_flags |= DQ_WANT;
238 (void) tsleep(dq, PINOD+1, "chkiq", 0);
239 }
240 ncurinodes = dq->dq_curinodes + change;
241 if (ncurinodes >= 0)
242 dq->dq_curinodes = ncurinodes;
243 else
244 dq->dq_curinodes = 0;
245 dq->dq_flags &= ~DQ_INODS;
246 dq->dq_flags |= DQ_MOD;
247 }
248 return (0);
249 }
250 if ((flags & FORCE) == 0 && cred->cr_uid != 0) {
251 for (i = 0; i < MAXQUOTAS; i++) {
252 if ((dq = ip->i_dquot[i]) == NODQUOT)
253 continue;
254 if ((error = chkiqchg(ip, change, cred, i)) != 0)
255 return (error);
256 }
257 }
258 for (i = 0; i < MAXQUOTAS; i++) {
259 if ((dq = ip->i_dquot[i]) == NODQUOT)
260 continue;
261 while (dq->dq_flags & DQ_LOCK) {
262 dq->dq_flags |= DQ_WANT;
263 (void) tsleep(dq, PINOD+1, "chkiq", 0);
264 }
265 dq->dq_curinodes += change;
266 dq->dq_flags |= DQ_MOD;
267 }
268 return (0);
269 }
270
271 /*
272 * Check for a valid change to a users allocation.
273 * Issue an error message if appropriate.
274 */
275 int
276 chkiqchg(ip, change, cred, type)
277 struct inode *ip;
278 int32_t change;
279 struct ucred *cred;
280 int type;
281 {
282 struct dquot *dq = ip->i_dquot[type];
283 long ncurinodes = dq->dq_curinodes + change;
284
285 /*
286 * If user would exceed their hard limit, disallow inode allocation.
287 */
288 if (ncurinodes >= dq->dq_ihardlimit && dq->dq_ihardlimit) {
289 if ((dq->dq_flags & DQ_INODS) == 0 &&
290 ip->i_uid == cred->cr_uid) {
291 uprintf("\n%s: write failed, %s inode limit reached\n",
292 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
293 quotatypes[type]);
294 dq->dq_flags |= DQ_INODS;
295 }
296 return (EDQUOT);
297 }
298 /*
299 * If user is over their soft limit for too long, disallow inode
300 * allocation. Reset time limit as they cross their soft limit.
301 */
302 if (ncurinodes >= dq->dq_isoftlimit && dq->dq_isoftlimit) {
303 if (dq->dq_curinodes < dq->dq_isoftlimit) {
304 dq->dq_itime = time.tv_sec + ip->i_ump->um_itime[type];
305 if (ip->i_uid == cred->cr_uid)
306 uprintf("\n%s: warning, %s %s\n",
307 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
308 quotatypes[type], "inode quota exceeded");
309 return (0);
310 }
311 if (time.tv_sec > dq->dq_itime) {
312 if ((dq->dq_flags & DQ_INODS) == 0 &&
313 ip->i_uid == cred->cr_uid) {
314 uprintf("\n%s: write failed, %s %s\n",
315 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
316 quotatypes[type],
317 "inode quota exceeded for too long");
318 dq->dq_flags |= DQ_INODS;
319 }
320 return (EDQUOT);
321 }
322 }
323 return (0);
324 }
325
326 #ifdef DIAGNOSTIC
327 /*
328 * On filesystems with quotas enabled, it is an error for a file to change
329 * size and not to have a dquot structure associated with it.
330 */
331 void
332 chkdquot(ip)
333 struct inode *ip;
334 {
335 struct ufsmount *ump = ip->i_ump;
336 int i;
337
338 for (i = 0; i < MAXQUOTAS; i++) {
339 if (ump->um_quotas[i] == NULLVP ||
340 (ump->um_qflags[i] & (QTF_OPENING|QTF_CLOSING)))
341 continue;
342 if (ip->i_dquot[i] == NODQUOT) {
343 vprint("chkdquot: missing dquot", ITOV(ip));
344 panic("missing dquot");
345 }
346 }
347 }
348 #endif
349
350 /*
351 * Code to process quotactl commands.
352 */
353
354 /*
355 * Q_QUOTAON - set up a quota file for a particular file system.
356 */
357 int
358 quotaon(p, mp, type, fname)
359 struct proc *p;
360 struct mount *mp;
361 int type;
362 caddr_t fname;
363 {
364 struct ufsmount *ump = VFSTOUFS(mp);
365 struct vnode *vp, **vpp;
366 struct vnode *nextvp;
367 struct dquot *dq;
368 int error;
369 struct nameidata nd;
370
371 vpp = &ump->um_quotas[type];
372 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname, p);
373 if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0)
374 return (error);
375 vp = nd.ni_vp;
376 VOP_UNLOCK(vp, 0);
377 if (vp->v_type != VREG) {
378 (void) vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
379 return (EACCES);
380 }
381 if (*vpp != vp)
382 quotaoff(p, mp, type);
383 ump->um_qflags[type] |= QTF_OPENING;
384 mp->mnt_flag |= MNT_QUOTA;
385 vp->v_flag |= VSYSTEM;
386 *vpp = vp;
387 /*
388 * Save the credential of the process that turned on quotas.
389 * Set up the time limits for this quota.
390 */
391 crhold(p->p_ucred);
392 ump->um_cred[type] = p->p_ucred;
393 ump->um_btime[type] = MAX_DQ_TIME;
394 ump->um_itime[type] = MAX_IQ_TIME;
395 if (dqget(NULLVP, 0, ump, type, &dq) == 0) {
396 if (dq->dq_btime > 0)
397 ump->um_btime[type] = dq->dq_btime;
398 if (dq->dq_itime > 0)
399 ump->um_itime[type] = dq->dq_itime;
400 dqrele(NULLVP, dq);
401 }
402 /*
403 * Search vnodes associated with this mount point,
404 * adding references to quota file being opened.
405 * NB: only need to add dquot's for inodes being modified.
406 */
407 again:
408 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
409 nextvp = LIST_NEXT(vp, v_mntvnodes);
410 if (vp->v_type == VNON ||vp->v_writecount == 0)
411 continue;
412 if (vget(vp, LK_EXCLUSIVE))
413 goto again;
414 if ((error = getinoquota(VTOI(vp))) != 0) {
415 vput(vp);
416 break;
417 }
418 vput(vp);
419 if (LIST_NEXT(vp, v_mntvnodes) != nextvp || vp->v_mount != mp)
420 goto again;
421 }
422 ump->um_qflags[type] &= ~QTF_OPENING;
423 if (error)
424 quotaoff(p, mp, type);
425 return (error);
426 }
427
428 /*
429 * Q_QUOTAOFF - turn off disk quotas for a filesystem.
430 */
431 int
432 quotaoff(p, mp, type)
433 struct proc *p;
434 struct mount *mp;
435 int type;
436 {
437 struct vnode *vp;
438 struct vnode *qvp, *nextvp;
439 struct ufsmount *ump = VFSTOUFS(mp);
440 struct dquot *dq;
441 struct inode *ip;
442 int error;
443
444 if ((qvp = ump->um_quotas[type]) == NULLVP)
445 return (0);
446 ump->um_qflags[type] |= QTF_CLOSING;
447 /*
448 * Search vnodes associated with this mount point,
449 * deleting any references to quota file being closed.
450 */
451 again:
452 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
453 nextvp = LIST_NEXT(vp, v_mntvnodes);
454 if (vp->v_type == VNON)
455 continue;
456 if (vget(vp, LK_EXCLUSIVE))
457 goto again;
458 ip = VTOI(vp);
459 dq = ip->i_dquot[type];
460 ip->i_dquot[type] = NODQUOT;
461 dqrele(vp, dq);
462 vput(vp);
463 if (LIST_NEXT(vp, v_mntvnodes) != nextvp || vp->v_mount != mp)
464 goto again;
465 }
466 dqflush(qvp);
467 qvp->v_flag &= ~VSYSTEM;
468 error = vn_close(qvp, FREAD|FWRITE, p->p_ucred, p);
469 ump->um_quotas[type] = NULLVP;
470 crfree(ump->um_cred[type]);
471 ump->um_cred[type] = NOCRED;
472 ump->um_qflags[type] &= ~QTF_CLOSING;
473 for (type = 0; type < MAXQUOTAS; type++)
474 if (ump->um_quotas[type] != NULLVP)
475 break;
476 if (type == MAXQUOTAS)
477 mp->mnt_flag &= ~MNT_QUOTA;
478 return (error);
479 }
480
481 /*
482 * Q_GETQUOTA - return current values in a dqblk structure.
483 */
484 int
485 getquota(mp, id, type, addr)
486 struct mount *mp;
487 u_long id;
488 int type;
489 caddr_t addr;
490 {
491 struct dquot *dq;
492 int error;
493
494 if ((error = dqget(NULLVP, id, VFSTOUFS(mp), type, &dq)) != 0)
495 return (error);
496 error = copyout((caddr_t)&dq->dq_dqb, addr, sizeof (struct dqblk));
497 dqrele(NULLVP, dq);
498 return (error);
499 }
500
501 /*
502 * Q_SETQUOTA - assign an entire dqblk structure.
503 */
504 int
505 setquota(mp, id, type, addr)
506 struct mount *mp;
507 u_long id;
508 int type;
509 caddr_t addr;
510 {
511 struct dquot *dq;
512 struct dquot *ndq;
513 struct ufsmount *ump = VFSTOUFS(mp);
514 struct dqblk newlim;
515 int error;
516
517 error = copyin(addr, (caddr_t)&newlim, sizeof (struct dqblk));
518 if (error)
519 return (error);
520 if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0)
521 return (error);
522 dq = ndq;
523 while (dq->dq_flags & DQ_LOCK) {
524 dq->dq_flags |= DQ_WANT;
525 (void) tsleep(dq, PINOD+1, "setquota", 0);
526 }
527 /*
528 * Copy all but the current values.
529 * Reset time limit if previously had no soft limit or were
530 * under it, but now have a soft limit and are over it.
531 */
532 newlim.dqb_curblocks = dq->dq_curblocks;
533 newlim.dqb_curinodes = dq->dq_curinodes;
534 if (dq->dq_id != 0) {
535 newlim.dqb_btime = dq->dq_btime;
536 newlim.dqb_itime = dq->dq_itime;
537 }
538 if (newlim.dqb_bsoftlimit &&
539 dq->dq_curblocks >= newlim.dqb_bsoftlimit &&
540 (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit))
541 newlim.dqb_btime = time.tv_sec + ump->um_btime[type];
542 if (newlim.dqb_isoftlimit &&
543 dq->dq_curinodes >= newlim.dqb_isoftlimit &&
544 (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit))
545 newlim.dqb_itime = time.tv_sec + ump->um_itime[type];
546 dq->dq_dqb = newlim;
547 if (dq->dq_curblocks < dq->dq_bsoftlimit)
548 dq->dq_flags &= ~DQ_BLKS;
549 if (dq->dq_curinodes < dq->dq_isoftlimit)
550 dq->dq_flags &= ~DQ_INODS;
551 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
552 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
553 dq->dq_flags |= DQ_FAKE;
554 else
555 dq->dq_flags &= ~DQ_FAKE;
556 dq->dq_flags |= DQ_MOD;
557 dqrele(NULLVP, dq);
558 return (0);
559 }
560
561 /*
562 * Q_SETUSE - set current inode and block usage.
563 */
564 int
565 setuse(mp, id, type, addr)
566 struct mount *mp;
567 u_long id;
568 int type;
569 caddr_t addr;
570 {
571 struct dquot *dq;
572 struct ufsmount *ump = VFSTOUFS(mp);
573 struct dquot *ndq;
574 struct dqblk usage;
575 int error;
576
577 error = copyin(addr, (caddr_t)&usage, sizeof (struct dqblk));
578 if (error)
579 return (error);
580 if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0)
581 return (error);
582 dq = ndq;
583 while (dq->dq_flags & DQ_LOCK) {
584 dq->dq_flags |= DQ_WANT;
585 (void) tsleep(dq, PINOD+1, "setuse", 0);
586 }
587 /*
588 * Reset time limit if have a soft limit and were
589 * previously under it, but are now over it.
590 */
591 if (dq->dq_bsoftlimit && dq->dq_curblocks < dq->dq_bsoftlimit &&
592 usage.dqb_curblocks >= dq->dq_bsoftlimit)
593 dq->dq_btime = time.tv_sec + ump->um_btime[type];
594 if (dq->dq_isoftlimit && dq->dq_curinodes < dq->dq_isoftlimit &&
595 usage.dqb_curinodes >= dq->dq_isoftlimit)
596 dq->dq_itime = time.tv_sec + ump->um_itime[type];
597 dq->dq_curblocks = usage.dqb_curblocks;
598 dq->dq_curinodes = usage.dqb_curinodes;
599 if (dq->dq_curblocks < dq->dq_bsoftlimit)
600 dq->dq_flags &= ~DQ_BLKS;
601 if (dq->dq_curinodes < dq->dq_isoftlimit)
602 dq->dq_flags &= ~DQ_INODS;
603 dq->dq_flags |= DQ_MOD;
604 dqrele(NULLVP, dq);
605 return (0);
606 }
607
608 /*
609 * Q_SYNC - sync quota files to disk.
610 */
611 int
612 qsync(mp)
613 struct mount *mp;
614 {
615 struct ufsmount *ump = VFSTOUFS(mp);
616 struct vnode *vp, *nextvp;
617 struct dquot *dq;
618 int i, error;
619
620 /*
621 * Check if the mount point has any quotas.
622 * If not, simply return.
623 */
624 for (i = 0; i < MAXQUOTAS; i++)
625 if (ump->um_quotas[i] != NULLVP)
626 break;
627 if (i == MAXQUOTAS)
628 return (0);
629 /*
630 * Search vnodes associated with this mount point,
631 * synchronizing any modified dquot structures.
632 */
633 simple_lock(&mntvnode_slock);
634 again:
635 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
636 if (vp->v_mount != mp)
637 goto again;
638 nextvp = LIST_NEXT(vp, v_mntvnodes);
639 if (vp->v_type == VNON)
640 continue;
641 simple_lock(&vp->v_interlock);
642 simple_unlock(&mntvnode_slock);
643 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
644 if (error) {
645 simple_lock(&mntvnode_slock);
646 if (error == ENOENT)
647 goto again;
648 continue;
649 }
650 for (i = 0; i < MAXQUOTAS; i++) {
651 dq = VTOI(vp)->i_dquot[i];
652 if (dq != NODQUOT && (dq->dq_flags & DQ_MOD))
653 dqsync(vp, dq);
654 }
655 vput(vp);
656 simple_lock(&mntvnode_slock);
657 if (LIST_NEXT(vp, v_mntvnodes) != nextvp)
658 goto again;
659 }
660 simple_unlock(&mntvnode_slock);
661 return (0);
662 }
663
664 /*
665 * Code pertaining to management of the in-core dquot data structures.
666 */
667 #define DQHASH(dqvp, id) \
668 (((((long)(dqvp)) >> 8) + id) & dqhash)
669 LIST_HEAD(dqhashhead, dquot) *dqhashtbl;
670 u_long dqhash;
671
672 /*
673 * Dquot free list.
674 */
675 #define DQUOTINC 5 /* minimum free dquots desired */
676 TAILQ_HEAD(dqfreelist, dquot) dqfreelist;
677 long numdquot, desireddquot = DQUOTINC;
678
679 MALLOC_DEFINE(M_DQUOT, "UFS quota", "UFS quota entries");
680
681 /*
682 * Initialize the quota system.
683 */
684 void
685 dqinit()
686 {
687 dqhashtbl =
688 hashinit(desiredvnodes, HASH_LIST, M_DQUOT, M_WAITOK, &dqhash);
689 TAILQ_INIT(&dqfreelist);
690 }
691
692 void
693 dqreinit()
694 {
695 struct dquot *dq;
696 struct dqhashhead *oldhash, *hash;
697 struct vnode *dqvp;
698 u_long oldmask, mask, hashval;
699 int i;
700
701 hash = hashinit(desiredvnodes, HASH_LIST, M_DQUOT, M_WAITOK, &mask);
702 oldhash = dqhashtbl;
703 oldmask = dqhash;
704 dqhashtbl = hash;
705 dqhash = mask;
706 for (i = 0; i <= oldmask; i++) {
707 while ((dq = LIST_FIRST(&oldhash[i])) != NULL) {
708 dqvp = dq->dq_ump->um_quotas[dq->dq_type];
709 LIST_REMOVE(dq, dq_hash);
710 hashval = DQHASH(dqvp, dq->dq_id);
711 LIST_INSERT_HEAD(&dqhashtbl[hashval], dq, dq_hash);
712 }
713 }
714 hashdone(oldhash, M_DQUOT);
715 }
716
717 /*
718 * Free resources held by quota system.
719 */
720 void
721 dqdone()
722 {
723 hashdone(dqhashtbl, M_DQUOT);
724 }
725
726 /*
727 * Obtain a dquot structure for the specified identifier and quota file
728 * reading the information from the file if necessary.
729 */
730 int
731 dqget(vp, id, ump, type, dqp)
732 struct vnode *vp;
733 u_long id;
734 struct ufsmount *ump;
735 int type;
736 struct dquot **dqp;
737 {
738 struct dquot *dq;
739 struct dqhashhead *dqh;
740 struct vnode *dqvp;
741 struct iovec aiov;
742 struct uio auio;
743 int error;
744
745 dqvp = ump->um_quotas[type];
746 if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) {
747 *dqp = NODQUOT;
748 return (EINVAL);
749 }
750 /*
751 * Check the cache first.
752 */
753 dqh = &dqhashtbl[DQHASH(dqvp, id)];
754 LIST_FOREACH(dq, dqh, dq_hash) {
755 if (dq->dq_id != id ||
756 dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
757 continue;
758 /*
759 * Cache hit with no references. Take
760 * the structure off the free list.
761 */
762 if (dq->dq_cnt == 0)
763 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
764 DQREF(dq);
765 *dqp = dq;
766 return (0);
767 }
768 /*
769 * Not in cache, allocate a new one.
770 */
771 if (dqfreelist.tqh_first == NODQUOT &&
772 numdquot < MAXQUOTAS * desiredvnodes)
773 desireddquot += DQUOTINC;
774 if (numdquot < desireddquot) {
775 dq = (struct dquot *)malloc(sizeof *dq, M_DQUOT, M_WAITOK);
776 memset((char *)dq, 0, sizeof *dq);
777 numdquot++;
778 } else {
779 if ((dq = dqfreelist.tqh_first) == NULL) {
780 tablefull("dquot",
781 "increase kern.maxvnodes or NVNODE");
782 *dqp = NODQUOT;
783 return (EUSERS);
784 }
785 if (dq->dq_cnt || (dq->dq_flags & DQ_MOD))
786 panic("free dquot isn't");
787 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
788 LIST_REMOVE(dq, dq_hash);
789 }
790 /*
791 * Initialize the contents of the dquot structure.
792 */
793 if (vp != dqvp)
794 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
795 LIST_INSERT_HEAD(dqh, dq, dq_hash);
796 DQREF(dq);
797 dq->dq_flags = DQ_LOCK;
798 dq->dq_id = id;
799 dq->dq_ump = ump;
800 dq->dq_type = type;
801 auio.uio_iov = &aiov;
802 auio.uio_iovcnt = 1;
803 aiov.iov_base = (caddr_t)&dq->dq_dqb;
804 aiov.iov_len = sizeof (struct dqblk);
805 auio.uio_resid = sizeof (struct dqblk);
806 auio.uio_offset = (off_t)(id * sizeof (struct dqblk));
807 auio.uio_segflg = UIO_SYSSPACE;
808 auio.uio_rw = UIO_READ;
809 auio.uio_procp = (struct proc *)0;
810 error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
811 if (auio.uio_resid == sizeof(struct dqblk) && error == 0)
812 memset((caddr_t)&dq->dq_dqb, 0, sizeof(struct dqblk));
813 if (vp != dqvp)
814 VOP_UNLOCK(dqvp, 0);
815 if (dq->dq_flags & DQ_WANT)
816 wakeup((caddr_t)dq);
817 dq->dq_flags = 0;
818 /*
819 * I/O error in reading quota file, release
820 * quota structure and reflect problem to caller.
821 */
822 if (error) {
823 LIST_REMOVE(dq, dq_hash);
824 dqrele(vp, dq);
825 *dqp = NODQUOT;
826 return (error);
827 }
828 /*
829 * Check for no limit to enforce.
830 * Initialize time values if necessary.
831 */
832 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
833 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
834 dq->dq_flags |= DQ_FAKE;
835 if (dq->dq_id != 0) {
836 if (dq->dq_btime == 0)
837 dq->dq_btime = time.tv_sec + ump->um_btime[type];
838 if (dq->dq_itime == 0)
839 dq->dq_itime = time.tv_sec + ump->um_itime[type];
840 }
841 *dqp = dq;
842 return (0);
843 }
844
845 /*
846 * Obtain a reference to a dquot.
847 */
848 void
849 dqref(dq)
850 struct dquot *dq;
851 {
852
853 dq->dq_cnt++;
854 }
855
856 /*
857 * Release a reference to a dquot.
858 */
859 void
860 dqrele(vp, dq)
861 struct vnode *vp;
862 struct dquot *dq;
863 {
864
865 if (dq == NODQUOT)
866 return;
867 if (dq->dq_cnt > 1) {
868 dq->dq_cnt--;
869 return;
870 }
871 if (dq->dq_flags & DQ_MOD)
872 (void) dqsync(vp, dq);
873 if (--dq->dq_cnt > 0)
874 return;
875 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
876 }
877
878 /*
879 * Update the disk quota in the quota file.
880 */
881 int
882 dqsync(vp, dq)
883 struct vnode *vp;
884 struct dquot *dq;
885 {
886 struct vnode *dqvp;
887 struct mount *mp;
888 struct iovec aiov;
889 struct uio auio;
890 int error;
891
892 if (dq == NODQUOT)
893 panic("dqsync: dquot");
894 if ((dq->dq_flags & DQ_MOD) == 0)
895 return (0);
896 if ((dqvp = dq->dq_ump->um_quotas[dq->dq_type]) == NULLVP)
897 panic("dqsync: file");
898 vn_start_write(dqvp, &mp, V_WAIT | V_LOWER);
899 if (vp != dqvp)
900 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
901 while (dq->dq_flags & DQ_LOCK) {
902 dq->dq_flags |= DQ_WANT;
903 (void) tsleep(dq, PINOD+2, "dqsync", 0);
904 if ((dq->dq_flags & DQ_MOD) == 0) {
905 if (vp != dqvp)
906 VOP_UNLOCK(dqvp, 0);
907 vn_finished_write(mp, V_LOWER);
908 return (0);
909 }
910 }
911 dq->dq_flags |= DQ_LOCK;
912 auio.uio_iov = &aiov;
913 auio.uio_iovcnt = 1;
914 aiov.iov_base = (caddr_t)&dq->dq_dqb;
915 aiov.iov_len = sizeof (struct dqblk);
916 auio.uio_resid = sizeof (struct dqblk);
917 auio.uio_offset = (off_t)(dq->dq_id * sizeof (struct dqblk));
918 auio.uio_segflg = UIO_SYSSPACE;
919 auio.uio_rw = UIO_WRITE;
920 auio.uio_procp = (struct proc *)0;
921 error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]);
922 if (auio.uio_resid && error == 0)
923 error = EIO;
924 if (dq->dq_flags & DQ_WANT)
925 wakeup((caddr_t)dq);
926 dq->dq_flags &= ~(DQ_MOD|DQ_LOCK|DQ_WANT);
927 if (vp != dqvp)
928 VOP_UNLOCK(dqvp, 0);
929 vn_finished_write(mp, V_LOWER);
930 return (error);
931 }
932
933 /*
934 * Flush all entries from the cache for a particular vnode.
935 */
936 void
937 dqflush(vp)
938 struct vnode *vp;
939 {
940 struct dquot *dq, *nextdq;
941 struct dqhashhead *dqh;
942
943 /*
944 * Move all dquot's that used to refer to this quota
945 * file off their hash chains (they will eventually
946 * fall off the head of the free list and be re-used).
947 */
948 for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) {
949 for (dq = LIST_FIRST(dqh); dq; dq = nextdq) {
950 nextdq = LIST_NEXT(dq, dq_hash);
951 if (dq->dq_ump->um_quotas[dq->dq_type] != vp)
952 continue;
953 if (dq->dq_cnt)
954 panic("dqflush: stray dquot");
955 LIST_REMOVE(dq, dq_hash);
956 dq->dq_ump = NULL;
957 }
958 }
959 }
960