ufs_quota.c revision 1.41 1 /* $NetBSD: ufs_quota.c,v 1.41 2006/07/23 22:06:15 ad Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1990, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Robert Elz at The University of Melbourne.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: ufs_quota.c,v 1.41 2006/07/23 22:06:15 ad Exp $");
39
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/systm.h>
43 #include <sys/namei.h>
44 #include <sys/malloc.h>
45 #include <sys/file.h>
46 #include <sys/proc.h>
47 #include <sys/vnode.h>
48 #include <sys/mount.h>
49 #include <sys/kauth.h>
50
51 #include <ufs/ufs/quota.h>
52 #include <ufs/ufs/inode.h>
53 #include <ufs/ufs/ufsmount.h>
54 #include <ufs/ufs/ufs_extern.h>
55
56 /*
57 * Quota name to error message mapping.
58 */
59 static const char *quotatypes[] = INITQFNAMES;
60
61 /*
62 * Set up the quotas for an inode.
63 *
64 * This routine completely defines the semantics of quotas.
65 * If other criterion want to be used to establish quotas, the
66 * MAXQUOTAS value in quotas.h should be increased, and the
67 * additional dquots set up here.
68 */
69 int
70 getinoquota(struct inode *ip)
71 {
72 struct ufsmount *ump = ip->i_ump;
73 struct vnode *vp = ITOV(ip);
74 int error;
75
76 /*
77 * Set up the user quota based on file uid.
78 * EINVAL means that quotas are not enabled.
79 */
80 if (ip->i_dquot[USRQUOTA] == NODQUOT &&
81 (error =
82 dqget(vp, ip->i_uid, ump, USRQUOTA, &ip->i_dquot[USRQUOTA])) &&
83 error != EINVAL)
84 return (error);
85 /*
86 * Set up the group quota based on file gid.
87 * EINVAL means that quotas are not enabled.
88 */
89 if (ip->i_dquot[GRPQUOTA] == NODQUOT &&
90 (error =
91 dqget(vp, ip->i_gid, ump, GRPQUOTA, &ip->i_dquot[GRPQUOTA])) &&
92 error != EINVAL)
93 return (error);
94 return (0);
95 }
96
97 /*
98 * Update disk usage, and take corrective action.
99 */
100 int
101 chkdq(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
102 {
103 struct dquot *dq;
104 int i;
105 int ncurblocks, error;
106
107 #ifdef DIAGNOSTIC
108 if ((flags & CHOWN) == 0)
109 chkdquot(ip);
110 #endif
111 if (change == 0)
112 return (0);
113 if (change < 0) {
114 for (i = 0; i < MAXQUOTAS; i++) {
115 if ((dq = ip->i_dquot[i]) == NODQUOT)
116 continue;
117 while (dq->dq_flags & DQ_LOCK) {
118 dq->dq_flags |= DQ_WANT;
119 (void) tsleep(dq, PINOD+1, "chkdq", 0);
120 }
121 ncurblocks = dq->dq_curblocks + change;
122 if (ncurblocks >= 0)
123 dq->dq_curblocks = ncurblocks;
124 else
125 dq->dq_curblocks = 0;
126 dq->dq_flags &= ~DQ_BLKS;
127 dq->dq_flags |= DQ_MOD;
128 }
129 return (0);
130 }
131 if ((flags & FORCE) == 0 &&
132 (cred != NOCRED && kauth_cred_geteuid(cred) != 0)) {
133 for (i = 0; i < MAXQUOTAS; i++) {
134 if ((dq = ip->i_dquot[i]) == NODQUOT)
135 continue;
136 if ((error = chkdqchg(ip, change, cred, i)) != 0)
137 return (error);
138 }
139 }
140 for (i = 0; i < MAXQUOTAS; i++) {
141 if ((dq = ip->i_dquot[i]) == NODQUOT)
142 continue;
143 while (dq->dq_flags & DQ_LOCK) {
144 dq->dq_flags |= DQ_WANT;
145 (void) tsleep(dq, PINOD+1, "chkdq", 0);
146 }
147 dq->dq_curblocks += change;
148 dq->dq_flags |= DQ_MOD;
149 }
150 return (0);
151 }
152
153 /*
154 * Check for a valid change to a users allocation.
155 * Issue an error message if appropriate.
156 */
157 int
158 chkdqchg(struct inode *ip, int64_t change, kauth_cred_t cred, int type)
159 {
160 struct dquot *dq = ip->i_dquot[type];
161 long ncurblocks = dq->dq_curblocks + change;
162
163 /*
164 * If user would exceed their hard limit, disallow space allocation.
165 */
166 if (ncurblocks >= dq->dq_bhardlimit && dq->dq_bhardlimit) {
167 if ((dq->dq_flags & DQ_BLKS) == 0 &&
168 ip->i_uid == kauth_cred_geteuid(cred)) {
169 uprintf("\n%s: write failed, %s disk limit reached\n",
170 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
171 quotatypes[type]);
172 dq->dq_flags |= DQ_BLKS;
173 }
174 return (EDQUOT);
175 }
176 /*
177 * If user is over their soft limit for too long, disallow space
178 * allocation. Reset time limit as they cross their soft limit.
179 */
180 if (ncurblocks >= dq->dq_bsoftlimit && dq->dq_bsoftlimit) {
181 if (dq->dq_curblocks < dq->dq_bsoftlimit) {
182 dq->dq_btime = time_second + ip->i_ump->um_btime[type];
183 if (ip->i_uid == kauth_cred_geteuid(cred))
184 uprintf("\n%s: warning, %s %s\n",
185 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
186 quotatypes[type], "disk quota exceeded");
187 return (0);
188 }
189 if (time_second > dq->dq_btime) {
190 if ((dq->dq_flags & DQ_BLKS) == 0 &&
191 ip->i_uid == kauth_cred_geteuid(cred)) {
192 uprintf("\n%s: write failed, %s %s\n",
193 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
194 quotatypes[type],
195 "disk quota exceeded for too long");
196 dq->dq_flags |= DQ_BLKS;
197 }
198 return (EDQUOT);
199 }
200 }
201 return (0);
202 }
203
204 /*
205 * Check the inode limit, applying corrective action.
206 */
207 int
208 chkiq(struct inode *ip, int32_t change, kauth_cred_t cred, int flags)
209 {
210 struct dquot *dq;
211 int i;
212 int ncurinodes, error;
213
214 #ifdef DIAGNOSTIC
215 if ((flags & CHOWN) == 0)
216 chkdquot(ip);
217 #endif
218 if (change == 0)
219 return (0);
220 if (change < 0) {
221 for (i = 0; i < MAXQUOTAS; i++) {
222 if ((dq = ip->i_dquot[i]) == NODQUOT)
223 continue;
224 while (dq->dq_flags & DQ_LOCK) {
225 dq->dq_flags |= DQ_WANT;
226 (void) tsleep(dq, PINOD+1, "chkiq", 0);
227 }
228 ncurinodes = dq->dq_curinodes + change;
229 if (ncurinodes >= 0)
230 dq->dq_curinodes = ncurinodes;
231 else
232 dq->dq_curinodes = 0;
233 dq->dq_flags &= ~DQ_INODS;
234 dq->dq_flags |= DQ_MOD;
235 }
236 return (0);
237 }
238 if ((flags & FORCE) == 0 && kauth_cred_geteuid(cred) != 0) {
239 for (i = 0; i < MAXQUOTAS; i++) {
240 if ((dq = ip->i_dquot[i]) == NODQUOT)
241 continue;
242 if ((error = chkiqchg(ip, change, cred, i)) != 0)
243 return (error);
244 }
245 }
246 for (i = 0; i < MAXQUOTAS; i++) {
247 if ((dq = ip->i_dquot[i]) == NODQUOT)
248 continue;
249 while (dq->dq_flags & DQ_LOCK) {
250 dq->dq_flags |= DQ_WANT;
251 (void) tsleep(dq, PINOD+1, "chkiq", 0);
252 }
253 dq->dq_curinodes += change;
254 dq->dq_flags |= DQ_MOD;
255 }
256 return (0);
257 }
258
259 /*
260 * Check for a valid change to a users allocation.
261 * Issue an error message if appropriate.
262 */
263 int
264 chkiqchg(struct inode *ip, int32_t change, kauth_cred_t cred, int type)
265 {
266 struct dquot *dq = ip->i_dquot[type];
267 long ncurinodes = dq->dq_curinodes + change;
268
269 /*
270 * If user would exceed their hard limit, disallow inode allocation.
271 */
272 if (ncurinodes >= dq->dq_ihardlimit && dq->dq_ihardlimit) {
273 if ((dq->dq_flags & DQ_INODS) == 0 &&
274 ip->i_uid == kauth_cred_geteuid(cred)) {
275 uprintf("\n%s: write failed, %s inode limit reached\n",
276 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
277 quotatypes[type]);
278 dq->dq_flags |= DQ_INODS;
279 }
280 return (EDQUOT);
281 }
282 /*
283 * If user is over their soft limit for too long, disallow inode
284 * allocation. Reset time limit as they cross their soft limit.
285 */
286 if (ncurinodes >= dq->dq_isoftlimit && dq->dq_isoftlimit) {
287 if (dq->dq_curinodes < dq->dq_isoftlimit) {
288 dq->dq_itime = time_second + ip->i_ump->um_itime[type];
289 if (ip->i_uid == kauth_cred_geteuid(cred))
290 uprintf("\n%s: warning, %s %s\n",
291 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
292 quotatypes[type], "inode quota exceeded");
293 return (0);
294 }
295 if (time_second > dq->dq_itime) {
296 if ((dq->dq_flags & DQ_INODS) == 0 &&
297 ip->i_uid == kauth_cred_geteuid(cred)) {
298 uprintf("\n%s: write failed, %s %s\n",
299 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
300 quotatypes[type],
301 "inode quota exceeded for too long");
302 dq->dq_flags |= DQ_INODS;
303 }
304 return (EDQUOT);
305 }
306 }
307 return (0);
308 }
309
310 #ifdef DIAGNOSTIC
311 /*
312 * On filesystems with quotas enabled, it is an error for a file to change
313 * size and not to have a dquot structure associated with it.
314 */
315 void
316 chkdquot(struct inode *ip)
317 {
318 struct ufsmount *ump = ip->i_ump;
319 int i;
320
321 for (i = 0; i < MAXQUOTAS; i++) {
322 if (ump->um_quotas[i] == NULLVP ||
323 (ump->um_qflags[i] & (QTF_OPENING|QTF_CLOSING)))
324 continue;
325 if (ip->i_dquot[i] == NODQUOT) {
326 vprint("chkdquot: missing dquot", ITOV(ip));
327 panic("missing dquot");
328 }
329 }
330 }
331 #endif
332
333 /*
334 * Code to process quotactl commands.
335 */
336
337 /*
338 * Q_QUOTAON - set up a quota file for a particular file system.
339 */
340 int
341 quotaon(struct lwp *l, struct mount *mp, int type, caddr_t fname)
342 {
343 struct ufsmount *ump = VFSTOUFS(mp);
344 struct vnode *vp, **vpp;
345 struct vnode *nextvp;
346 struct dquot *dq;
347 int error;
348 struct nameidata nd;
349
350 vpp = &ump->um_quotas[type];
351 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname, l);
352 if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0)
353 return (error);
354 vp = nd.ni_vp;
355 VOP_UNLOCK(vp, 0);
356 if (vp->v_type != VREG) {
357 (void) vn_close(vp, FREAD|FWRITE, l->l_cred, l);
358 return (EACCES);
359 }
360 if (*vpp != vp)
361 quotaoff(l, mp, type);
362 ump->um_qflags[type] |= QTF_OPENING;
363 mp->mnt_flag |= MNT_QUOTA;
364 vp->v_flag |= VSYSTEM;
365 *vpp = vp;
366 /*
367 * Save the credential of the process that turned on quotas.
368 * Set up the time limits for this quota.
369 */
370 kauth_cred_hold(l->l_cred);
371 ump->um_cred[type] = l->l_cred;
372 ump->um_btime[type] = MAX_DQ_TIME;
373 ump->um_itime[type] = MAX_IQ_TIME;
374 if (dqget(NULLVP, 0, ump, type, &dq) == 0) {
375 if (dq->dq_btime > 0)
376 ump->um_btime[type] = dq->dq_btime;
377 if (dq->dq_itime > 0)
378 ump->um_itime[type] = dq->dq_itime;
379 dqrele(NULLVP, dq);
380 }
381 /*
382 * Search vnodes associated with this mount point,
383 * adding references to quota file being opened.
384 * NB: only need to add dquot's for inodes being modified.
385 */
386 again:
387 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
388 nextvp = LIST_NEXT(vp, v_mntvnodes);
389 if (vp->v_type == VNON ||vp->v_writecount == 0)
390 continue;
391 if (vget(vp, LK_EXCLUSIVE))
392 goto again;
393 if ((error = getinoquota(VTOI(vp))) != 0) {
394 vput(vp);
395 break;
396 }
397 vput(vp);
398 if (LIST_NEXT(vp, v_mntvnodes) != nextvp || vp->v_mount != mp)
399 goto again;
400 }
401 ump->um_qflags[type] &= ~QTF_OPENING;
402 if (error)
403 quotaoff(l, mp, type);
404 return (error);
405 }
406
407 /*
408 * Q_QUOTAOFF - turn off disk quotas for a filesystem.
409 */
410 int
411 quotaoff(struct lwp *l, struct mount *mp, int type)
412 {
413 struct vnode *vp;
414 struct vnode *qvp, *nextvp;
415 struct ufsmount *ump = VFSTOUFS(mp);
416 struct dquot *dq;
417 struct inode *ip;
418 int error;
419
420 if ((qvp = ump->um_quotas[type]) == NULLVP)
421 return (0);
422 ump->um_qflags[type] |= QTF_CLOSING;
423 /*
424 * Search vnodes associated with this mount point,
425 * deleting any references to quota file being closed.
426 */
427 again:
428 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
429 nextvp = LIST_NEXT(vp, v_mntvnodes);
430 if (vp->v_type == VNON)
431 continue;
432 if (vget(vp, LK_EXCLUSIVE))
433 goto again;
434 ip = VTOI(vp);
435 dq = ip->i_dquot[type];
436 ip->i_dquot[type] = NODQUOT;
437 dqrele(vp, dq);
438 vput(vp);
439 if (LIST_NEXT(vp, v_mntvnodes) != nextvp || vp->v_mount != mp)
440 goto again;
441 }
442 dqflush(qvp);
443 qvp->v_flag &= ~VSYSTEM;
444 error = vn_close(qvp, FREAD|FWRITE, l->l_cred, l);
445 ump->um_quotas[type] = NULLVP;
446 kauth_cred_free(ump->um_cred[type]);
447 ump->um_cred[type] = NOCRED;
448 ump->um_qflags[type] &= ~QTF_CLOSING;
449 for (type = 0; type < MAXQUOTAS; type++)
450 if (ump->um_quotas[type] != NULLVP)
451 break;
452 if (type == MAXQUOTAS)
453 mp->mnt_flag &= ~MNT_QUOTA;
454 return (error);
455 }
456
457 /*
458 * Q_GETQUOTA - return current values in a dqblk structure.
459 */
460 int
461 getquota(struct mount *mp, u_long id, int type, caddr_t addr)
462 {
463 struct dquot *dq;
464 int error;
465
466 if ((error = dqget(NULLVP, id, VFSTOUFS(mp), type, &dq)) != 0)
467 return (error);
468 error = copyout((caddr_t)&dq->dq_dqb, addr, sizeof (struct dqblk));
469 dqrele(NULLVP, dq);
470 return (error);
471 }
472
473 /*
474 * Q_SETQUOTA - assign an entire dqblk structure.
475 */
476 int
477 setquota(struct mount *mp, u_long id, int type, caddr_t addr)
478 {
479 struct dquot *dq;
480 struct dquot *ndq;
481 struct ufsmount *ump = VFSTOUFS(mp);
482 struct dqblk newlim;
483 int error;
484
485 error = copyin(addr, (caddr_t)&newlim, sizeof (struct dqblk));
486 if (error)
487 return (error);
488 if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0)
489 return (error);
490 dq = ndq;
491 while (dq->dq_flags & DQ_LOCK) {
492 dq->dq_flags |= DQ_WANT;
493 (void) tsleep(dq, PINOD+1, "setquota", 0);
494 }
495 /*
496 * Copy all but the current values.
497 * Reset time limit if previously had no soft limit or were
498 * under it, but now have a soft limit and are over it.
499 */
500 newlim.dqb_curblocks = dq->dq_curblocks;
501 newlim.dqb_curinodes = dq->dq_curinodes;
502 if (dq->dq_id != 0) {
503 newlim.dqb_btime = dq->dq_btime;
504 newlim.dqb_itime = dq->dq_itime;
505 }
506 if (newlim.dqb_bsoftlimit &&
507 dq->dq_curblocks >= newlim.dqb_bsoftlimit &&
508 (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit))
509 newlim.dqb_btime = time_second + ump->um_btime[type];
510 if (newlim.dqb_isoftlimit &&
511 dq->dq_curinodes >= newlim.dqb_isoftlimit &&
512 (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit))
513 newlim.dqb_itime = time_second + ump->um_itime[type];
514 dq->dq_dqb = newlim;
515 if (dq->dq_curblocks < dq->dq_bsoftlimit)
516 dq->dq_flags &= ~DQ_BLKS;
517 if (dq->dq_curinodes < dq->dq_isoftlimit)
518 dq->dq_flags &= ~DQ_INODS;
519 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
520 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
521 dq->dq_flags |= DQ_FAKE;
522 else
523 dq->dq_flags &= ~DQ_FAKE;
524 dq->dq_flags |= DQ_MOD;
525 dqrele(NULLVP, dq);
526 return (0);
527 }
528
529 /*
530 * Q_SETUSE - set current inode and block usage.
531 */
532 int
533 setuse(struct mount *mp, u_long id, int type, caddr_t addr)
534 {
535 struct dquot *dq;
536 struct ufsmount *ump = VFSTOUFS(mp);
537 struct dquot *ndq;
538 struct dqblk usage;
539 int error;
540
541 error = copyin(addr, (caddr_t)&usage, sizeof (struct dqblk));
542 if (error)
543 return (error);
544 if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0)
545 return (error);
546 dq = ndq;
547 while (dq->dq_flags & DQ_LOCK) {
548 dq->dq_flags |= DQ_WANT;
549 (void) tsleep(dq, PINOD+1, "setuse", 0);
550 }
551 /*
552 * Reset time limit if have a soft limit and were
553 * previously under it, but are now over it.
554 */
555 if (dq->dq_bsoftlimit && dq->dq_curblocks < dq->dq_bsoftlimit &&
556 usage.dqb_curblocks >= dq->dq_bsoftlimit)
557 dq->dq_btime = time_second + ump->um_btime[type];
558 if (dq->dq_isoftlimit && dq->dq_curinodes < dq->dq_isoftlimit &&
559 usage.dqb_curinodes >= dq->dq_isoftlimit)
560 dq->dq_itime = time_second + ump->um_itime[type];
561 dq->dq_curblocks = usage.dqb_curblocks;
562 dq->dq_curinodes = usage.dqb_curinodes;
563 if (dq->dq_curblocks < dq->dq_bsoftlimit)
564 dq->dq_flags &= ~DQ_BLKS;
565 if (dq->dq_curinodes < dq->dq_isoftlimit)
566 dq->dq_flags &= ~DQ_INODS;
567 dq->dq_flags |= DQ_MOD;
568 dqrele(NULLVP, dq);
569 return (0);
570 }
571
572 /*
573 * Q_SYNC - sync quota files to disk.
574 */
575 int
576 qsync(struct mount *mp)
577 {
578 struct ufsmount *ump = VFSTOUFS(mp);
579 struct vnode *vp, *nextvp;
580 struct dquot *dq;
581 int i, error;
582
583 /*
584 * Check if the mount point has any quotas.
585 * If not, simply return.
586 */
587 for (i = 0; i < MAXQUOTAS; i++)
588 if (ump->um_quotas[i] != NULLVP)
589 break;
590 if (i == MAXQUOTAS)
591 return (0);
592 /*
593 * Search vnodes associated with this mount point,
594 * synchronizing any modified dquot structures.
595 */
596 simple_lock(&mntvnode_slock);
597 again:
598 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
599 if (vp->v_mount != mp)
600 goto again;
601 nextvp = LIST_NEXT(vp, v_mntvnodes);
602 if (vp->v_type == VNON)
603 continue;
604 simple_lock(&vp->v_interlock);
605 simple_unlock(&mntvnode_slock);
606 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
607 if (error) {
608 simple_lock(&mntvnode_slock);
609 if (error == ENOENT)
610 goto again;
611 continue;
612 }
613 for (i = 0; i < MAXQUOTAS; i++) {
614 dq = VTOI(vp)->i_dquot[i];
615 if (dq != NODQUOT && (dq->dq_flags & DQ_MOD))
616 dqsync(vp, dq);
617 }
618 vput(vp);
619 simple_lock(&mntvnode_slock);
620 if (LIST_NEXT(vp, v_mntvnodes) != nextvp)
621 goto again;
622 }
623 simple_unlock(&mntvnode_slock);
624 return (0);
625 }
626
627 /*
628 * Code pertaining to management of the in-core dquot data structures.
629 */
630 #define DQHASH(dqvp, id) \
631 (((((long)(dqvp)) >> 8) + id) & dqhash)
632 static LIST_HEAD(dqhashhead, dquot) *dqhashtbl;
633 static u_long dqhash;
634
635 /*
636 * Dquot free list.
637 */
638 #define DQUOTINC 5 /* minimum free dquots desired */
639 static TAILQ_HEAD(dqfreelist, dquot) dqfreelist;
640 static long numdquot, desireddquot = DQUOTINC;
641
642 MALLOC_DEFINE(M_DQUOT, "UFS quota", "UFS quota entries");
643
644 /*
645 * Initialize the quota system.
646 */
647 void
648 dqinit(void)
649 {
650 dqhashtbl =
651 hashinit(desiredvnodes, HASH_LIST, M_DQUOT, M_WAITOK, &dqhash);
652 TAILQ_INIT(&dqfreelist);
653 }
654
655 void
656 dqreinit(void)
657 {
658 struct dquot *dq;
659 struct dqhashhead *oldhash, *hash;
660 struct vnode *dqvp;
661 u_long oldmask, mask, hashval;
662 int i;
663
664 hash = hashinit(desiredvnodes, HASH_LIST, M_DQUOT, M_WAITOK, &mask);
665 oldhash = dqhashtbl;
666 oldmask = dqhash;
667 dqhashtbl = hash;
668 dqhash = mask;
669 for (i = 0; i <= oldmask; i++) {
670 while ((dq = LIST_FIRST(&oldhash[i])) != NULL) {
671 dqvp = dq->dq_ump->um_quotas[dq->dq_type];
672 LIST_REMOVE(dq, dq_hash);
673 hashval = DQHASH(dqvp, dq->dq_id);
674 LIST_INSERT_HEAD(&dqhashtbl[hashval], dq, dq_hash);
675 }
676 }
677 hashdone(oldhash, M_DQUOT);
678 }
679
680 /*
681 * Free resources held by quota system.
682 */
683 void
684 dqdone(void)
685 {
686 hashdone(dqhashtbl, M_DQUOT);
687 }
688
689 /*
690 * Obtain a dquot structure for the specified identifier and quota file
691 * reading the information from the file if necessary.
692 */
693 int
694 dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type,
695 struct dquot **dqp)
696 {
697 struct dquot *dq;
698 struct dqhashhead *dqh;
699 struct vnode *dqvp;
700 struct iovec aiov;
701 struct uio auio;
702 int error;
703
704 dqvp = ump->um_quotas[type];
705 if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) {
706 *dqp = NODQUOT;
707 return (EINVAL);
708 }
709 /*
710 * Check the cache first.
711 */
712 dqh = &dqhashtbl[DQHASH(dqvp, id)];
713 LIST_FOREACH(dq, dqh, dq_hash) {
714 if (dq->dq_id != id ||
715 dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
716 continue;
717 /*
718 * Cache hit with no references. Take
719 * the structure off the free list.
720 */
721 if (dq->dq_cnt == 0)
722 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
723 dqref(dq);
724 *dqp = dq;
725 return (0);
726 }
727 /*
728 * Not in cache, allocate a new one.
729 */
730 if (dqfreelist.tqh_first == NODQUOT &&
731 numdquot < MAXQUOTAS * desiredvnodes)
732 desireddquot += DQUOTINC;
733 if (numdquot < desireddquot) {
734 dq = (struct dquot *)malloc(sizeof *dq, M_DQUOT, M_WAITOK);
735 memset((char *)dq, 0, sizeof *dq);
736 numdquot++;
737 } else {
738 if ((dq = dqfreelist.tqh_first) == NULL) {
739 tablefull("dquot",
740 "increase kern.maxvnodes or NVNODE");
741 *dqp = NODQUOT;
742 return (EUSERS);
743 }
744 if (dq->dq_cnt || (dq->dq_flags & DQ_MOD))
745 panic("free dquot isn't");
746 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
747 LIST_REMOVE(dq, dq_hash);
748 }
749 /*
750 * Initialize the contents of the dquot structure.
751 */
752 if (vp != dqvp)
753 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
754 LIST_INSERT_HEAD(dqh, dq, dq_hash);
755 dqref(dq);
756 dq->dq_flags = DQ_LOCK;
757 dq->dq_id = id;
758 dq->dq_ump = ump;
759 dq->dq_type = type;
760 auio.uio_iov = &aiov;
761 auio.uio_iovcnt = 1;
762 aiov.iov_base = (caddr_t)&dq->dq_dqb;
763 aiov.iov_len = sizeof (struct dqblk);
764 auio.uio_resid = sizeof (struct dqblk);
765 auio.uio_offset = (off_t)(id * sizeof (struct dqblk));
766 auio.uio_rw = UIO_READ;
767 UIO_SETUP_SYSSPACE(&auio);
768 error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
769 if (auio.uio_resid == sizeof(struct dqblk) && error == 0)
770 memset((caddr_t)&dq->dq_dqb, 0, sizeof(struct dqblk));
771 if (vp != dqvp)
772 VOP_UNLOCK(dqvp, 0);
773 if (dq->dq_flags & DQ_WANT)
774 wakeup((caddr_t)dq);
775 dq->dq_flags = 0;
776 /*
777 * I/O error in reading quota file, release
778 * quota structure and reflect problem to caller.
779 */
780 if (error) {
781 LIST_REMOVE(dq, dq_hash);
782 dqrele(vp, dq);
783 *dqp = NODQUOT;
784 return (error);
785 }
786 /*
787 * Check for no limit to enforce.
788 * Initialize time values if necessary.
789 */
790 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
791 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
792 dq->dq_flags |= DQ_FAKE;
793 if (dq->dq_id != 0) {
794 if (dq->dq_btime == 0)
795 dq->dq_btime = time_second + ump->um_btime[type];
796 if (dq->dq_itime == 0)
797 dq->dq_itime = time_second + ump->um_itime[type];
798 }
799 *dqp = dq;
800 return (0);
801 }
802
803 /*
804 * Obtain a reference to a dquot.
805 */
806 void
807 dqref(struct dquot *dq)
808 {
809
810 dq->dq_cnt++;
811 }
812
813 /*
814 * Release a reference to a dquot.
815 */
816 void
817 dqrele(struct vnode *vp, struct dquot *dq)
818 {
819
820 if (dq == NODQUOT)
821 return;
822 if (dq->dq_cnt > 1) {
823 dq->dq_cnt--;
824 return;
825 }
826 if (dq->dq_flags & DQ_MOD)
827 (void) dqsync(vp, dq);
828 if (--dq->dq_cnt > 0)
829 return;
830 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
831 }
832
833 /*
834 * Update the disk quota in the quota file.
835 */
836 int
837 dqsync(struct vnode *vp, struct dquot *dq)
838 {
839 struct vnode *dqvp;
840 struct mount *mp;
841 struct iovec aiov;
842 struct uio auio;
843 int error;
844
845 if (dq == NODQUOT)
846 panic("dqsync: dquot");
847 if ((dq->dq_flags & DQ_MOD) == 0)
848 return (0);
849 if ((dqvp = dq->dq_ump->um_quotas[dq->dq_type]) == NULLVP)
850 panic("dqsync: file");
851 vn_start_write(dqvp, &mp, V_WAIT | V_LOWER);
852 if (vp != dqvp)
853 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
854 while (dq->dq_flags & DQ_LOCK) {
855 dq->dq_flags |= DQ_WANT;
856 (void) tsleep(dq, PINOD+2, "dqsync", 0);
857 if ((dq->dq_flags & DQ_MOD) == 0) {
858 if (vp != dqvp)
859 VOP_UNLOCK(dqvp, 0);
860 vn_finished_write(mp, V_LOWER);
861 return (0);
862 }
863 }
864 dq->dq_flags |= DQ_LOCK;
865 auio.uio_iov = &aiov;
866 auio.uio_iovcnt = 1;
867 aiov.iov_base = (caddr_t)&dq->dq_dqb;
868 aiov.iov_len = sizeof (struct dqblk);
869 auio.uio_resid = sizeof (struct dqblk);
870 auio.uio_offset = (off_t)(dq->dq_id * sizeof (struct dqblk));
871 auio.uio_rw = UIO_WRITE;
872 UIO_SETUP_SYSSPACE(&auio);
873 error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]);
874 if (auio.uio_resid && error == 0)
875 error = EIO;
876 if (dq->dq_flags & DQ_WANT)
877 wakeup((caddr_t)dq);
878 dq->dq_flags &= ~(DQ_MOD|DQ_LOCK|DQ_WANT);
879 if (vp != dqvp)
880 VOP_UNLOCK(dqvp, 0);
881 vn_finished_write(mp, V_LOWER);
882 return (error);
883 }
884
885 /*
886 * Flush all entries from the cache for a particular vnode.
887 */
888 void
889 dqflush(struct vnode *vp)
890 {
891 struct dquot *dq, *nextdq;
892 struct dqhashhead *dqh;
893
894 /*
895 * Move all dquot's that used to refer to this quota
896 * file off their hash chains (they will eventually
897 * fall off the head of the free list and be re-used).
898 */
899 for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) {
900 for (dq = LIST_FIRST(dqh); dq; dq = nextdq) {
901 nextdq = LIST_NEXT(dq, dq_hash);
902 if (dq->dq_ump->um_quotas[dq->dq_type] != vp)
903 continue;
904 if (dq->dq_cnt)
905 panic("dqflush: stray dquot");
906 LIST_REMOVE(dq, dq_hash);
907 dq->dq_ump = NULL;
908 }
909 }
910 }
911