ufs_quota.c revision 1.42 1 /* $NetBSD: ufs_quota.c,v 1.42 2006/10/20 18:58:13 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1990, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Robert Elz at The University of Melbourne.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: ufs_quota.c,v 1.42 2006/10/20 18:58:13 reinoud Exp $");
39
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/systm.h>
43 #include <sys/namei.h>
44 #include <sys/malloc.h>
45 #include <sys/file.h>
46 #include <sys/proc.h>
47 #include <sys/vnode.h>
48 #include <sys/mount.h>
49 #include <sys/kauth.h>
50
51 #include <ufs/ufs/quota.h>
52 #include <ufs/ufs/inode.h>
53 #include <ufs/ufs/ufsmount.h>
54 #include <ufs/ufs/ufs_extern.h>
55
56 /*
57 * Quota name to error message mapping.
58 */
59 static const char *quotatypes[] = INITQFNAMES;
60
61 /*
62 * Set up the quotas for an inode.
63 *
64 * This routine completely defines the semantics of quotas.
65 * If other criterion want to be used to establish quotas, the
66 * MAXQUOTAS value in quotas.h should be increased, and the
67 * additional dquots set up here.
68 */
69 int
70 getinoquota(struct inode *ip)
71 {
72 struct ufsmount *ump = ip->i_ump;
73 struct vnode *vp = ITOV(ip);
74 int error;
75
76 /*
77 * Set up the user quota based on file uid.
78 * EINVAL means that quotas are not enabled.
79 */
80 if (ip->i_dquot[USRQUOTA] == NODQUOT &&
81 (error =
82 dqget(vp, ip->i_uid, ump, USRQUOTA, &ip->i_dquot[USRQUOTA])) &&
83 error != EINVAL)
84 return (error);
85 /*
86 * Set up the group quota based on file gid.
87 * EINVAL means that quotas are not enabled.
88 */
89 if (ip->i_dquot[GRPQUOTA] == NODQUOT &&
90 (error =
91 dqget(vp, ip->i_gid, ump, GRPQUOTA, &ip->i_dquot[GRPQUOTA])) &&
92 error != EINVAL)
93 return (error);
94 return (0);
95 }
96
97 /*
98 * Update disk usage, and take corrective action.
99 */
100 int
101 chkdq(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
102 {
103 struct dquot *dq;
104 int i;
105 int ncurblocks, error;
106
107 #ifdef DIAGNOSTIC
108 if ((flags & CHOWN) == 0)
109 chkdquot(ip);
110 #endif
111 if (change == 0)
112 return (0);
113 if (change < 0) {
114 for (i = 0; i < MAXQUOTAS; i++) {
115 if ((dq = ip->i_dquot[i]) == NODQUOT)
116 continue;
117 while (dq->dq_flags & DQ_LOCK) {
118 dq->dq_flags |= DQ_WANT;
119 (void) tsleep(dq, PINOD+1, "chkdq", 0);
120 }
121 ncurblocks = dq->dq_curblocks + change;
122 if (ncurblocks >= 0)
123 dq->dq_curblocks = ncurblocks;
124 else
125 dq->dq_curblocks = 0;
126 dq->dq_flags &= ~DQ_BLKS;
127 dq->dq_flags |= DQ_MOD;
128 }
129 return (0);
130 }
131 if ((flags & FORCE) == 0 &&
132 (cred != NOCRED && kauth_cred_geteuid(cred) != 0)) {
133 for (i = 0; i < MAXQUOTAS; i++) {
134 if ((dq = ip->i_dquot[i]) == NODQUOT)
135 continue;
136 if ((error = chkdqchg(ip, change, cred, i)) != 0)
137 return (error);
138 }
139 }
140 for (i = 0; i < MAXQUOTAS; i++) {
141 if ((dq = ip->i_dquot[i]) == NODQUOT)
142 continue;
143 while (dq->dq_flags & DQ_LOCK) {
144 dq->dq_flags |= DQ_WANT;
145 (void) tsleep(dq, PINOD+1, "chkdq", 0);
146 }
147 dq->dq_curblocks += change;
148 dq->dq_flags |= DQ_MOD;
149 }
150 return (0);
151 }
152
153 /*
154 * Check for a valid change to a users allocation.
155 * Issue an error message if appropriate.
156 */
157 int
158 chkdqchg(struct inode *ip, int64_t change, kauth_cred_t cred, int type)
159 {
160 struct dquot *dq = ip->i_dquot[type];
161 long ncurblocks = dq->dq_curblocks + change;
162
163 /*
164 * If user would exceed their hard limit, disallow space allocation.
165 */
166 if (ncurblocks >= dq->dq_bhardlimit && dq->dq_bhardlimit) {
167 if ((dq->dq_flags & DQ_BLKS) == 0 &&
168 ip->i_uid == kauth_cred_geteuid(cred)) {
169 uprintf("\n%s: write failed, %s disk limit reached\n",
170 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
171 quotatypes[type]);
172 dq->dq_flags |= DQ_BLKS;
173 }
174 return (EDQUOT);
175 }
176 /*
177 * If user is over their soft limit for too long, disallow space
178 * allocation. Reset time limit as they cross their soft limit.
179 */
180 if (ncurblocks >= dq->dq_bsoftlimit && dq->dq_bsoftlimit) {
181 if (dq->dq_curblocks < dq->dq_bsoftlimit) {
182 dq->dq_btime = time_second + ip->i_ump->um_btime[type];
183 if (ip->i_uid == kauth_cred_geteuid(cred))
184 uprintf("\n%s: warning, %s %s\n",
185 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
186 quotatypes[type], "disk quota exceeded");
187 return (0);
188 }
189 if (time_second > dq->dq_btime) {
190 if ((dq->dq_flags & DQ_BLKS) == 0 &&
191 ip->i_uid == kauth_cred_geteuid(cred)) {
192 uprintf("\n%s: write failed, %s %s\n",
193 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
194 quotatypes[type],
195 "disk quota exceeded for too long");
196 dq->dq_flags |= DQ_BLKS;
197 }
198 return (EDQUOT);
199 }
200 }
201 return (0);
202 }
203
204 /*
205 * Check the inode limit, applying corrective action.
206 */
207 int
208 chkiq(struct inode *ip, int32_t change, kauth_cred_t cred, int flags)
209 {
210 struct dquot *dq;
211 int i;
212 int ncurinodes, error;
213
214 #ifdef DIAGNOSTIC
215 if ((flags & CHOWN) == 0)
216 chkdquot(ip);
217 #endif
218 if (change == 0)
219 return (0);
220 if (change < 0) {
221 for (i = 0; i < MAXQUOTAS; i++) {
222 if ((dq = ip->i_dquot[i]) == NODQUOT)
223 continue;
224 while (dq->dq_flags & DQ_LOCK) {
225 dq->dq_flags |= DQ_WANT;
226 (void) tsleep(dq, PINOD+1, "chkiq", 0);
227 }
228 ncurinodes = dq->dq_curinodes + change;
229 if (ncurinodes >= 0)
230 dq->dq_curinodes = ncurinodes;
231 else
232 dq->dq_curinodes = 0;
233 dq->dq_flags &= ~DQ_INODS;
234 dq->dq_flags |= DQ_MOD;
235 }
236 return (0);
237 }
238 if ((flags & FORCE) == 0 && kauth_cred_geteuid(cred) != 0) {
239 for (i = 0; i < MAXQUOTAS; i++) {
240 if ((dq = ip->i_dquot[i]) == NODQUOT)
241 continue;
242 if ((error = chkiqchg(ip, change, cred, i)) != 0)
243 return (error);
244 }
245 }
246 for (i = 0; i < MAXQUOTAS; i++) {
247 if ((dq = ip->i_dquot[i]) == NODQUOT)
248 continue;
249 while (dq->dq_flags & DQ_LOCK) {
250 dq->dq_flags |= DQ_WANT;
251 (void) tsleep(dq, PINOD+1, "chkiq", 0);
252 }
253 dq->dq_curinodes += change;
254 dq->dq_flags |= DQ_MOD;
255 }
256 return (0);
257 }
258
259 /*
260 * Check for a valid change to a users allocation.
261 * Issue an error message if appropriate.
262 */
263 int
264 chkiqchg(struct inode *ip, int32_t change, kauth_cred_t cred, int type)
265 {
266 struct dquot *dq = ip->i_dquot[type];
267 long ncurinodes = dq->dq_curinodes + change;
268
269 /*
270 * If user would exceed their hard limit, disallow inode allocation.
271 */
272 if (ncurinodes >= dq->dq_ihardlimit && dq->dq_ihardlimit) {
273 if ((dq->dq_flags & DQ_INODS) == 0 &&
274 ip->i_uid == kauth_cred_geteuid(cred)) {
275 uprintf("\n%s: write failed, %s inode limit reached\n",
276 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
277 quotatypes[type]);
278 dq->dq_flags |= DQ_INODS;
279 }
280 return (EDQUOT);
281 }
282 /*
283 * If user is over their soft limit for too long, disallow inode
284 * allocation. Reset time limit as they cross their soft limit.
285 */
286 if (ncurinodes >= dq->dq_isoftlimit && dq->dq_isoftlimit) {
287 if (dq->dq_curinodes < dq->dq_isoftlimit) {
288 dq->dq_itime = time_second + ip->i_ump->um_itime[type];
289 if (ip->i_uid == kauth_cred_geteuid(cred))
290 uprintf("\n%s: warning, %s %s\n",
291 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
292 quotatypes[type], "inode quota exceeded");
293 return (0);
294 }
295 if (time_second > dq->dq_itime) {
296 if ((dq->dq_flags & DQ_INODS) == 0 &&
297 ip->i_uid == kauth_cred_geteuid(cred)) {
298 uprintf("\n%s: write failed, %s %s\n",
299 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
300 quotatypes[type],
301 "inode quota exceeded for too long");
302 dq->dq_flags |= DQ_INODS;
303 }
304 return (EDQUOT);
305 }
306 }
307 return (0);
308 }
309
310 #ifdef DIAGNOSTIC
311 /*
312 * On filesystems with quotas enabled, it is an error for a file to change
313 * size and not to have a dquot structure associated with it.
314 */
315 void
316 chkdquot(struct inode *ip)
317 {
318 struct ufsmount *ump = ip->i_ump;
319 int i;
320
321 for (i = 0; i < MAXQUOTAS; i++) {
322 if (ump->um_quotas[i] == NULLVP ||
323 (ump->um_qflags[i] & (QTF_OPENING|QTF_CLOSING)))
324 continue;
325 if (ip->i_dquot[i] == NODQUOT) {
326 vprint("chkdquot: missing dquot", ITOV(ip));
327 panic("missing dquot");
328 }
329 }
330 }
331 #endif
332
333 /*
334 * Code to process quotactl commands.
335 */
336
337 /*
338 * Q_QUOTAON - set up a quota file for a particular file system.
339 */
340 int
341 quotaon(struct lwp *l, struct mount *mp, int type, caddr_t fname)
342 {
343 struct ufsmount *ump = VFSTOUFS(mp);
344 struct vnode *vp, **vpp;
345 struct vnode *nextvp;
346 struct dquot *dq;
347 int error;
348 struct nameidata nd;
349
350 vpp = &ump->um_quotas[type];
351 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname, l);
352 if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0)
353 return (error);
354 vp = nd.ni_vp;
355 VOP_UNLOCK(vp, 0);
356 if (vp->v_type != VREG) {
357 (void) vn_close(vp, FREAD|FWRITE, l->l_cred, l);
358 return (EACCES);
359 }
360 if (*vpp != vp)
361 quotaoff(l, mp, type);
362 ump->um_qflags[type] |= QTF_OPENING;
363 mp->mnt_flag |= MNT_QUOTA;
364 vp->v_flag |= VSYSTEM;
365 *vpp = vp;
366 /*
367 * Save the credential of the process that turned on quotas.
368 * Set up the time limits for this quota.
369 */
370 kauth_cred_hold(l->l_cred);
371 ump->um_cred[type] = l->l_cred;
372 ump->um_btime[type] = MAX_DQ_TIME;
373 ump->um_itime[type] = MAX_IQ_TIME;
374 if (dqget(NULLVP, 0, ump, type, &dq) == 0) {
375 if (dq->dq_btime > 0)
376 ump->um_btime[type] = dq->dq_btime;
377 if (dq->dq_itime > 0)
378 ump->um_itime[type] = dq->dq_itime;
379 dqrele(NULLVP, dq);
380 }
381 /*
382 * Search vnodes associated with this mount point,
383 * adding references to quota file being opened.
384 * NB: only need to add dquot's for inodes being modified.
385 */
386 again:
387 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
388 nextvp = TAILQ_NEXT(vp, v_mntvnodes);
389 if (vp->v_mount != mp)
390 goto again;
391 if (vp->v_type == VNON ||vp->v_writecount == 0)
392 continue;
393 if (vget(vp, LK_EXCLUSIVE))
394 goto again;
395 if ((error = getinoquota(VTOI(vp))) != 0) {
396 vput(vp);
397 break;
398 }
399 vput(vp);
400 /* if the list changed, start again */
401 if (TAILQ_NEXT(vp, v_mntvnodes) != nextvp)
402 goto again;
403 }
404 ump->um_qflags[type] &= ~QTF_OPENING;
405 if (error)
406 quotaoff(l, mp, type);
407 return (error);
408 }
409
410 /*
411 * Q_QUOTAOFF - turn off disk quotas for a filesystem.
412 */
413 int
414 quotaoff(struct lwp *l, struct mount *mp, int type)
415 {
416 struct vnode *vp;
417 struct vnode *qvp, *nextvp;
418 struct ufsmount *ump = VFSTOUFS(mp);
419 struct dquot *dq;
420 struct inode *ip;
421 int error;
422
423 if ((qvp = ump->um_quotas[type]) == NULLVP)
424 return (0);
425 ump->um_qflags[type] |= QTF_CLOSING;
426 /*
427 * Search vnodes associated with this mount point,
428 * deleting any references to quota file being closed.
429 */
430 again:
431 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
432 nextvp = TAILQ_NEXT(vp, v_mntvnodes);
433 if (vp->v_mount != mp)
434 goto again;
435 if (vp->v_type == VNON)
436 continue;
437 if (vget(vp, LK_EXCLUSIVE))
438 goto again;
439 ip = VTOI(vp);
440 dq = ip->i_dquot[type];
441 ip->i_dquot[type] = NODQUOT;
442 dqrele(vp, dq);
443 vput(vp);
444 /* if the list changed, start again */
445 if (TAILQ_NEXT(vp, v_mntvnodes) != nextvp)
446 goto again;
447 }
448 dqflush(qvp);
449 qvp->v_flag &= ~VSYSTEM;
450 error = vn_close(qvp, FREAD|FWRITE, l->l_cred, l);
451 ump->um_quotas[type] = NULLVP;
452 kauth_cred_free(ump->um_cred[type]);
453 ump->um_cred[type] = NOCRED;
454 ump->um_qflags[type] &= ~QTF_CLOSING;
455 for (type = 0; type < MAXQUOTAS; type++)
456 if (ump->um_quotas[type] != NULLVP)
457 break;
458 if (type == MAXQUOTAS)
459 mp->mnt_flag &= ~MNT_QUOTA;
460 return (error);
461 }
462
463 /*
464 * Q_GETQUOTA - return current values in a dqblk structure.
465 */
466 int
467 getquota(struct mount *mp, u_long id, int type, caddr_t addr)
468 {
469 struct dquot *dq;
470 int error;
471
472 if ((error = dqget(NULLVP, id, VFSTOUFS(mp), type, &dq)) != 0)
473 return (error);
474 error = copyout((caddr_t)&dq->dq_dqb, addr, sizeof (struct dqblk));
475 dqrele(NULLVP, dq);
476 return (error);
477 }
478
479 /*
480 * Q_SETQUOTA - assign an entire dqblk structure.
481 */
482 int
483 setquota(struct mount *mp, u_long id, int type, caddr_t addr)
484 {
485 struct dquot *dq;
486 struct dquot *ndq;
487 struct ufsmount *ump = VFSTOUFS(mp);
488 struct dqblk newlim;
489 int error;
490
491 error = copyin(addr, (caddr_t)&newlim, sizeof (struct dqblk));
492 if (error)
493 return (error);
494 if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0)
495 return (error);
496 dq = ndq;
497 while (dq->dq_flags & DQ_LOCK) {
498 dq->dq_flags |= DQ_WANT;
499 (void) tsleep(dq, PINOD+1, "setquota", 0);
500 }
501 /*
502 * Copy all but the current values.
503 * Reset time limit if previously had no soft limit or were
504 * under it, but now have a soft limit and are over it.
505 */
506 newlim.dqb_curblocks = dq->dq_curblocks;
507 newlim.dqb_curinodes = dq->dq_curinodes;
508 if (dq->dq_id != 0) {
509 newlim.dqb_btime = dq->dq_btime;
510 newlim.dqb_itime = dq->dq_itime;
511 }
512 if (newlim.dqb_bsoftlimit &&
513 dq->dq_curblocks >= newlim.dqb_bsoftlimit &&
514 (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit))
515 newlim.dqb_btime = time_second + ump->um_btime[type];
516 if (newlim.dqb_isoftlimit &&
517 dq->dq_curinodes >= newlim.dqb_isoftlimit &&
518 (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit))
519 newlim.dqb_itime = time_second + ump->um_itime[type];
520 dq->dq_dqb = newlim;
521 if (dq->dq_curblocks < dq->dq_bsoftlimit)
522 dq->dq_flags &= ~DQ_BLKS;
523 if (dq->dq_curinodes < dq->dq_isoftlimit)
524 dq->dq_flags &= ~DQ_INODS;
525 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
526 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
527 dq->dq_flags |= DQ_FAKE;
528 else
529 dq->dq_flags &= ~DQ_FAKE;
530 dq->dq_flags |= DQ_MOD;
531 dqrele(NULLVP, dq);
532 return (0);
533 }
534
535 /*
536 * Q_SETUSE - set current inode and block usage.
537 */
538 int
539 setuse(struct mount *mp, u_long id, int type, caddr_t addr)
540 {
541 struct dquot *dq;
542 struct ufsmount *ump = VFSTOUFS(mp);
543 struct dquot *ndq;
544 struct dqblk usage;
545 int error;
546
547 error = copyin(addr, (caddr_t)&usage, sizeof (struct dqblk));
548 if (error)
549 return (error);
550 if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0)
551 return (error);
552 dq = ndq;
553 while (dq->dq_flags & DQ_LOCK) {
554 dq->dq_flags |= DQ_WANT;
555 (void) tsleep(dq, PINOD+1, "setuse", 0);
556 }
557 /*
558 * Reset time limit if have a soft limit and were
559 * previously under it, but are now over it.
560 */
561 if (dq->dq_bsoftlimit && dq->dq_curblocks < dq->dq_bsoftlimit &&
562 usage.dqb_curblocks >= dq->dq_bsoftlimit)
563 dq->dq_btime = time_second + ump->um_btime[type];
564 if (dq->dq_isoftlimit && dq->dq_curinodes < dq->dq_isoftlimit &&
565 usage.dqb_curinodes >= dq->dq_isoftlimit)
566 dq->dq_itime = time_second + ump->um_itime[type];
567 dq->dq_curblocks = usage.dqb_curblocks;
568 dq->dq_curinodes = usage.dqb_curinodes;
569 if (dq->dq_curblocks < dq->dq_bsoftlimit)
570 dq->dq_flags &= ~DQ_BLKS;
571 if (dq->dq_curinodes < dq->dq_isoftlimit)
572 dq->dq_flags &= ~DQ_INODS;
573 dq->dq_flags |= DQ_MOD;
574 dqrele(NULLVP, dq);
575 return (0);
576 }
577
578 /*
579 * Q_SYNC - sync quota files to disk.
580 */
581 int
582 qsync(struct mount *mp)
583 {
584 struct ufsmount *ump = VFSTOUFS(mp);
585 struct vnode *vp, *nextvp;
586 struct dquot *dq;
587 int i, error;
588
589 /*
590 * Check if the mount point has any quotas.
591 * If not, simply return.
592 */
593 for (i = 0; i < MAXQUOTAS; i++)
594 if (ump->um_quotas[i] != NULLVP)
595 break;
596 if (i == MAXQUOTAS)
597 return (0);
598 /*
599 * Search vnodes associated with this mount point,
600 * synchronizing any modified dquot structures.
601 */
602 simple_lock(&mntvnode_slock);
603 again:
604 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
605 nextvp = TAILQ_NEXT(vp, v_mntvnodes);
606 if (vp->v_mount != mp)
607 goto again;
608 if (vp->v_type == VNON)
609 continue;
610 simple_lock(&vp->v_interlock);
611 simple_unlock(&mntvnode_slock);
612 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
613 if (error) {
614 simple_lock(&mntvnode_slock);
615 if (error == ENOENT)
616 goto again;
617 continue;
618 }
619 for (i = 0; i < MAXQUOTAS; i++) {
620 dq = VTOI(vp)->i_dquot[i];
621 if (dq != NODQUOT && (dq->dq_flags & DQ_MOD))
622 dqsync(vp, dq);
623 }
624 vput(vp);
625 simple_lock(&mntvnode_slock);
626 /* if the list changed, start again */
627 if (TAILQ_NEXT(vp, v_mntvnodes) != nextvp)
628 goto again;
629 }
630 simple_unlock(&mntvnode_slock);
631 return (0);
632 }
633
634 /*
635 * Code pertaining to management of the in-core dquot data structures.
636 */
637 #define DQHASH(dqvp, id) \
638 (((((long)(dqvp)) >> 8) + id) & dqhash)
639 static LIST_HEAD(dqhashhead, dquot) *dqhashtbl;
640 static u_long dqhash;
641
642 /*
643 * Dquot free list.
644 */
645 #define DQUOTINC 5 /* minimum free dquots desired */
646 static TAILQ_HEAD(dqfreelist, dquot) dqfreelist;
647 static long numdquot, desireddquot = DQUOTINC;
648
649 MALLOC_DEFINE(M_DQUOT, "UFS quota", "UFS quota entries");
650
651 /*
652 * Initialize the quota system.
653 */
654 void
655 dqinit(void)
656 {
657 dqhashtbl =
658 hashinit(desiredvnodes, HASH_LIST, M_DQUOT, M_WAITOK, &dqhash);
659 TAILQ_INIT(&dqfreelist);
660 }
661
662 void
663 dqreinit(void)
664 {
665 struct dquot *dq;
666 struct dqhashhead *oldhash, *hash;
667 struct vnode *dqvp;
668 u_long oldmask, mask, hashval;
669 int i;
670
671 hash = hashinit(desiredvnodes, HASH_LIST, M_DQUOT, M_WAITOK, &mask);
672 oldhash = dqhashtbl;
673 oldmask = dqhash;
674 dqhashtbl = hash;
675 dqhash = mask;
676 for (i = 0; i <= oldmask; i++) {
677 while ((dq = LIST_FIRST(&oldhash[i])) != NULL) {
678 dqvp = dq->dq_ump->um_quotas[dq->dq_type];
679 LIST_REMOVE(dq, dq_hash);
680 hashval = DQHASH(dqvp, dq->dq_id);
681 LIST_INSERT_HEAD(&dqhashtbl[hashval], dq, dq_hash);
682 }
683 }
684 hashdone(oldhash, M_DQUOT);
685 }
686
687 /*
688 * Free resources held by quota system.
689 */
690 void
691 dqdone(void)
692 {
693 hashdone(dqhashtbl, M_DQUOT);
694 }
695
696 /*
697 * Obtain a dquot structure for the specified identifier and quota file
698 * reading the information from the file if necessary.
699 */
700 int
701 dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type,
702 struct dquot **dqp)
703 {
704 struct dquot *dq;
705 struct dqhashhead *dqh;
706 struct vnode *dqvp;
707 struct iovec aiov;
708 struct uio auio;
709 int error;
710
711 dqvp = ump->um_quotas[type];
712 if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) {
713 *dqp = NODQUOT;
714 return (EINVAL);
715 }
716 /*
717 * Check the cache first.
718 */
719 dqh = &dqhashtbl[DQHASH(dqvp, id)];
720 LIST_FOREACH(dq, dqh, dq_hash) {
721 if (dq->dq_id != id ||
722 dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
723 continue;
724 /*
725 * Cache hit with no references. Take
726 * the structure off the free list.
727 */
728 if (dq->dq_cnt == 0)
729 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
730 dqref(dq);
731 *dqp = dq;
732 return (0);
733 }
734 /*
735 * Not in cache, allocate a new one.
736 */
737 if (dqfreelist.tqh_first == NODQUOT &&
738 numdquot < MAXQUOTAS * desiredvnodes)
739 desireddquot += DQUOTINC;
740 if (numdquot < desireddquot) {
741 dq = (struct dquot *)malloc(sizeof *dq, M_DQUOT, M_WAITOK);
742 memset((char *)dq, 0, sizeof *dq);
743 numdquot++;
744 } else {
745 if ((dq = dqfreelist.tqh_first) == NULL) {
746 tablefull("dquot",
747 "increase kern.maxvnodes or NVNODE");
748 *dqp = NODQUOT;
749 return (EUSERS);
750 }
751 if (dq->dq_cnt || (dq->dq_flags & DQ_MOD))
752 panic("free dquot isn't");
753 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
754 LIST_REMOVE(dq, dq_hash);
755 }
756 /*
757 * Initialize the contents of the dquot structure.
758 */
759 if (vp != dqvp)
760 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
761 LIST_INSERT_HEAD(dqh, dq, dq_hash);
762 dqref(dq);
763 dq->dq_flags = DQ_LOCK;
764 dq->dq_id = id;
765 dq->dq_ump = ump;
766 dq->dq_type = type;
767 auio.uio_iov = &aiov;
768 auio.uio_iovcnt = 1;
769 aiov.iov_base = (caddr_t)&dq->dq_dqb;
770 aiov.iov_len = sizeof (struct dqblk);
771 auio.uio_resid = sizeof (struct dqblk);
772 auio.uio_offset = (off_t)(id * sizeof (struct dqblk));
773 auio.uio_rw = UIO_READ;
774 UIO_SETUP_SYSSPACE(&auio);
775 error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
776 if (auio.uio_resid == sizeof(struct dqblk) && error == 0)
777 memset((caddr_t)&dq->dq_dqb, 0, sizeof(struct dqblk));
778 if (vp != dqvp)
779 VOP_UNLOCK(dqvp, 0);
780 if (dq->dq_flags & DQ_WANT)
781 wakeup((caddr_t)dq);
782 dq->dq_flags = 0;
783 /*
784 * I/O error in reading quota file, release
785 * quota structure and reflect problem to caller.
786 */
787 if (error) {
788 LIST_REMOVE(dq, dq_hash);
789 dqrele(vp, dq);
790 *dqp = NODQUOT;
791 return (error);
792 }
793 /*
794 * Check for no limit to enforce.
795 * Initialize time values if necessary.
796 */
797 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
798 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
799 dq->dq_flags |= DQ_FAKE;
800 if (dq->dq_id != 0) {
801 if (dq->dq_btime == 0)
802 dq->dq_btime = time_second + ump->um_btime[type];
803 if (dq->dq_itime == 0)
804 dq->dq_itime = time_second + ump->um_itime[type];
805 }
806 *dqp = dq;
807 return (0);
808 }
809
810 /*
811 * Obtain a reference to a dquot.
812 */
813 void
814 dqref(struct dquot *dq)
815 {
816
817 dq->dq_cnt++;
818 }
819
820 /*
821 * Release a reference to a dquot.
822 */
823 void
824 dqrele(struct vnode *vp, struct dquot *dq)
825 {
826
827 if (dq == NODQUOT)
828 return;
829 if (dq->dq_cnt > 1) {
830 dq->dq_cnt--;
831 return;
832 }
833 if (dq->dq_flags & DQ_MOD)
834 (void) dqsync(vp, dq);
835 if (--dq->dq_cnt > 0)
836 return;
837 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
838 }
839
840 /*
841 * Update the disk quota in the quota file.
842 */
843 int
844 dqsync(struct vnode *vp, struct dquot *dq)
845 {
846 struct vnode *dqvp;
847 struct mount *mp;
848 struct iovec aiov;
849 struct uio auio;
850 int error;
851
852 if (dq == NODQUOT)
853 panic("dqsync: dquot");
854 if ((dq->dq_flags & DQ_MOD) == 0)
855 return (0);
856 if ((dqvp = dq->dq_ump->um_quotas[dq->dq_type]) == NULLVP)
857 panic("dqsync: file");
858 vn_start_write(dqvp, &mp, V_WAIT | V_LOWER);
859 if (vp != dqvp)
860 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
861 while (dq->dq_flags & DQ_LOCK) {
862 dq->dq_flags |= DQ_WANT;
863 (void) tsleep(dq, PINOD+2, "dqsync", 0);
864 if ((dq->dq_flags & DQ_MOD) == 0) {
865 if (vp != dqvp)
866 VOP_UNLOCK(dqvp, 0);
867 vn_finished_write(mp, V_LOWER);
868 return (0);
869 }
870 }
871 dq->dq_flags |= DQ_LOCK;
872 auio.uio_iov = &aiov;
873 auio.uio_iovcnt = 1;
874 aiov.iov_base = (caddr_t)&dq->dq_dqb;
875 aiov.iov_len = sizeof (struct dqblk);
876 auio.uio_resid = sizeof (struct dqblk);
877 auio.uio_offset = (off_t)(dq->dq_id * sizeof (struct dqblk));
878 auio.uio_rw = UIO_WRITE;
879 UIO_SETUP_SYSSPACE(&auio);
880 error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]);
881 if (auio.uio_resid && error == 0)
882 error = EIO;
883 if (dq->dq_flags & DQ_WANT)
884 wakeup((caddr_t)dq);
885 dq->dq_flags &= ~(DQ_MOD|DQ_LOCK|DQ_WANT);
886 if (vp != dqvp)
887 VOP_UNLOCK(dqvp, 0);
888 vn_finished_write(mp, V_LOWER);
889 return (error);
890 }
891
892 /*
893 * Flush all entries from the cache for a particular vnode.
894 */
895 void
896 dqflush(struct vnode *vp)
897 {
898 struct dquot *dq, *nextdq;
899 struct dqhashhead *dqh;
900
901 /*
902 * Move all dquot's that used to refer to this quota
903 * file off their hash chains (they will eventually
904 * fall off the head of the free list and be re-used).
905 */
906 for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) {
907 for (dq = LIST_FIRST(dqh); dq; dq = nextdq) {
908 nextdq = LIST_NEXT(dq, dq_hash);
909 if (dq->dq_ump->um_quotas[dq->dq_type] != vp)
910 continue;
911 if (dq->dq_cnt)
912 panic("dqflush: stray dquot");
913 LIST_REMOVE(dq, dq_hash);
914 dq->dq_ump = NULL;
915 }
916 }
917 }
918