ulfs_quota2.c revision 1.18 1 /* $NetBSD: ulfs_quota2.c,v 1.18 2015/03/28 19:24:05 maxv Exp $ */
2 /* from NetBSD: ufs_quota2.c,v 1.35 2012/09/27 07:47:56 bouyer Exp */
3 /* from NetBSD: ffs_quota2.c,v 1.4 2011/06/12 03:36:00 rmind Exp */
4
5 /*-
6 * Copyright (c) 2010 Manuel Bouyer
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: ulfs_quota2.c,v 1.18 2015/03/28 19:24:05 maxv Exp $");
33
34 #include <sys/buf.h>
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/namei.h>
39 #include <sys/file.h>
40 #include <sys/proc.h>
41 #include <sys/vnode.h>
42 #include <sys/mount.h>
43 #include <sys/fstrans.h>
44 #include <sys/kauth.h>
45 #include <sys/wapbl.h>
46 #include <sys/quota.h>
47 #include <sys/quotactl.h>
48
49 #include <ufs/lfs/lfs_extern.h>
50
51 #include <ufs/lfs/ulfs_quota2.h>
52 #include <ufs/lfs/ulfs_inode.h>
53 #include <ufs/lfs/ulfsmount.h>
54 #include <ufs/lfs/ulfs_bswap.h>
55 #include <ufs/lfs/ulfs_extern.h>
56 #include <ufs/lfs/ulfs_quota.h>
57
58 /*
59 * LOCKING:
60 * Data in the entries are protected by the associated struct dquot's
61 * dq_interlock (this means we can't read or change a quota entry without
62 * grabing a dquot for it).
63 * The header and lists (including pointers in the data entries, and q2e_uid)
64 * are protected by the global dqlock.
65 * the locking order is dq_interlock -> dqlock
66 */
67
68 static int quota2_bwrite(struct mount *, struct buf *);
69 static int getinoquota2(struct inode *, bool, bool, struct buf **,
70 struct quota2_entry **);
71 static int getq2h(struct ulfsmount *, int, struct buf **,
72 struct quota2_header **, int);
73 static int getq2e(struct ulfsmount *, int, daddr_t, int, struct buf **,
74 struct quota2_entry **, int);
75 static int quota2_walk_list(struct ulfsmount *, struct buf *, int,
76 uint64_t *, int, void *,
77 int (*func)(struct ulfsmount *, uint64_t *, struct quota2_entry *,
78 uint64_t, void *));
79
80 static const char *limnames[] = INITQLNAMES;
81
82 static void
83 quota2_dict_update_q2e_limits(int objtype, const struct quotaval *val,
84 struct quota2_entry *q2e)
85 {
86 /* make sure we can index q2e_val[] by the fs-independent objtype */
87 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
88 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
89
90 q2e->q2e_val[objtype].q2v_hardlimit = val->qv_hardlimit;
91 q2e->q2e_val[objtype].q2v_softlimit = val->qv_softlimit;
92 q2e->q2e_val[objtype].q2v_grace = val->qv_grace;
93 }
94
95 /*
96 * Convert internal representation to FS-independent representation.
97 * (Note that while the two types are currently identical, the
98 * internal representation is an on-disk struct and the FS-independent
99 * representation is not, and they might diverge in the future.)
100 */
101 static void
102 q2val_to_quotaval(struct quota2_val *q2v, struct quotaval *qv)
103 {
104 qv->qv_softlimit = q2v->q2v_softlimit;
105 qv->qv_hardlimit = q2v->q2v_hardlimit;
106 qv->qv_usage = q2v->q2v_cur;
107 qv->qv_expiretime = q2v->q2v_time;
108 qv->qv_grace = q2v->q2v_grace;
109 }
110
111 /*
112 * Convert a quota2entry and default-flag to the FS-independent
113 * representation.
114 */
115 static void
116 q2e_to_quotaval(struct quota2_entry *q2e, int def,
117 id_t *id, int objtype, struct quotaval *ret)
118 {
119 if (def) {
120 *id = QUOTA_DEFAULTID;
121 } else {
122 *id = q2e->q2e_uid;
123 }
124
125 KASSERT(objtype >= 0 && objtype < N_QL);
126 q2val_to_quotaval(&q2e->q2e_val[objtype], ret);
127 }
128
129
130 static int
131 quota2_bwrite(struct mount *mp, struct buf *bp)
132 {
133 if (mp->mnt_flag & MNT_SYNCHRONOUS)
134 return bwrite(bp);
135 else {
136 bdwrite(bp);
137 return 0;
138 }
139 }
140
141 static int
142 getq2h(struct ulfsmount *ump, int type,
143 struct buf **bpp, struct quota2_header **q2hp, int flags)
144 {
145 struct lfs *fs = ump->um_lfs;
146 const int needswap = ULFS_MPNEEDSWAP(fs);
147 int error;
148 struct buf *bp;
149 struct quota2_header *q2h;
150
151 KASSERT(mutex_owned(&lfs_dqlock));
152 error = bread(ump->um_quotas[type], 0, ump->umq2_bsize, flags, &bp);
153 if (error)
154 return error;
155 if (bp->b_resid != 0)
156 panic("dq2get: %s quota file truncated", lfs_quotatypes[type]);
157
158 q2h = (void *)bp->b_data;
159 if (ulfs_rw32(q2h->q2h_magic_number, needswap) != Q2_HEAD_MAGIC ||
160 q2h->q2h_type != type)
161 panic("dq2get: corrupted %s quota header", lfs_quotatypes[type]);
162 *bpp = bp;
163 *q2hp = q2h;
164 return 0;
165 }
166
167 static int
168 getq2e(struct ulfsmount *ump, int type, daddr_t lblkno, int blkoffset,
169 struct buf **bpp, struct quota2_entry **q2ep, int flags)
170 {
171 int error;
172 struct buf *bp;
173
174 if (blkoffset & (sizeof(uint64_t) - 1)) {
175 panic("dq2get: %s quota file corrupted",
176 lfs_quotatypes[type]);
177 }
178 error = bread(ump->um_quotas[type], lblkno, ump->umq2_bsize, flags, &bp);
179 if (error)
180 return error;
181 if (bp->b_resid != 0) {
182 panic("dq2get: %s quota file corrupted",
183 lfs_quotatypes[type]);
184 }
185 *q2ep = (void *)((char *)bp->b_data + blkoffset);
186 *bpp = bp;
187 return 0;
188 }
189
190 /* walk a quota entry list, calling the callback for each entry */
191 #define Q2WL_ABORT 0x10000000
192
193 static int
194 quota2_walk_list(struct ulfsmount *ump, struct buf *hbp, int type,
195 uint64_t *offp, int flags, void *a,
196 int (*func)(struct ulfsmount *, uint64_t *, struct quota2_entry *, uint64_t, void *))
197 {
198 struct lfs *fs = ump->um_lfs;
199 const int needswap = ULFS_MPNEEDSWAP(fs);
200 daddr_t off = ulfs_rw64(*offp, needswap);
201 struct buf *bp, *obp = hbp;
202 int ret = 0, ret2 = 0;
203 struct quota2_entry *q2e;
204 daddr_t lblkno, blkoff, olblkno = 0;
205
206 KASSERT(mutex_owner(&lfs_dqlock));
207
208 while (off != 0) {
209 lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
210 blkoff = (off & ump->umq2_bmask);
211 if (lblkno == 0) {
212 /* in the header block */
213 bp = hbp;
214 } else if (lblkno == olblkno) {
215 /* still in the same buf */
216 bp = obp;
217 } else {
218 ret = bread(ump->um_quotas[type], lblkno,
219 ump->umq2_bsize, flags, &bp);
220 if (ret)
221 return ret;
222 if (bp->b_resid != 0) {
223 panic("quota2_walk_list: %s quota file corrupted",
224 lfs_quotatypes[type]);
225 }
226 }
227 q2e = (void *)((char *)(bp->b_data) + blkoff);
228 ret = (*func)(ump, offp, q2e, off, a);
229 if (off != ulfs_rw64(*offp, needswap)) {
230 /* callback changed parent's pointer, redo */
231 off = ulfs_rw64(*offp, needswap);
232 if (bp != hbp && bp != obp)
233 ret2 = bwrite(bp);
234 } else {
235 /* parent if now current */
236 if (obp != bp && obp != hbp) {
237 if (flags & B_MODIFY)
238 ret2 = bwrite(obp);
239 else
240 brelse(obp, 0);
241 }
242 obp = bp;
243 olblkno = lblkno;
244 offp = &(q2e->q2e_next);
245 off = ulfs_rw64(*offp, needswap);
246 }
247 if (ret)
248 break;
249 if (ret2) {
250 ret = ret2;
251 break;
252 }
253 }
254 if (obp != hbp) {
255 if (flags & B_MODIFY)
256 ret2 = bwrite(obp);
257 else
258 brelse(obp, 0);
259 }
260 if (ret & Q2WL_ABORT)
261 return 0;
262 if (ret == 0)
263 return ret2;
264 return ret;
265 }
266
267 int
268 lfsquota2_umount(struct mount *mp, int flags)
269 {
270 int i, error;
271 struct ulfsmount *ump = VFSTOULFS(mp);
272 struct lfs *fs = ump->um_lfs;
273
274 if ((fs->um_flags & ULFS_QUOTA2) == 0)
275 return 0;
276
277 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
278 if (ump->um_quotas[i] != NULLVP) {
279 error = vn_close(ump->um_quotas[i], FREAD|FWRITE,
280 ump->um_cred[i]);
281 if (error) {
282 printf("quota2_umount failed: close(%p) %d\n",
283 ump->um_quotas[i], error);
284 return error;
285 }
286 }
287 ump->um_quotas[i] = NULLVP;
288 }
289 return 0;
290 }
291
292 static int
293 quota2_q2ealloc(struct ulfsmount *ump, int type, uid_t uid, struct dquot *dq)
294 {
295 int error, error2;
296 struct buf *hbp, *bp;
297 struct quota2_header *q2h;
298 struct quota2_entry *q2e;
299 daddr_t offset;
300 u_long hash_mask;
301 struct lfs *fs = ump->um_lfs;
302 const int needswap = ULFS_MPNEEDSWAP(fs);
303
304 KASSERT(mutex_owned(&dq->dq_interlock));
305 KASSERT(mutex_owned(&lfs_dqlock));
306 error = getq2h(ump, type, &hbp, &q2h, B_MODIFY);
307 if (error)
308 return error;
309 offset = ulfs_rw64(q2h->q2h_free, needswap);
310 if (offset == 0) {
311 struct vnode *vp = ump->um_quotas[type];
312 struct inode *ip = VTOI(vp);
313 uint64_t size = ip->i_size;
314 /* need to alocate a new disk block */
315 error = lfs_balloc(vp, size, ump->umq2_bsize,
316 ump->um_cred[type], B_CLRBUF | B_SYNC, &bp);
317 if (error) {
318 brelse(hbp, 0);
319 return error;
320 }
321 KASSERT((ip->i_size % ump->umq2_bsize) == 0);
322 ip->i_size += ump->umq2_bsize;
323 DIP_ASSIGN(ip, size, ip->i_size);
324 ip->i_flag |= IN_CHANGE | IN_UPDATE;
325 uvm_vnp_setsize(vp, ip->i_size);
326 lfsquota2_addfreeq2e(q2h, bp->b_data, size, ump->umq2_bsize,
327 needswap);
328 error = bwrite(bp);
329 error2 = lfs_update(vp, NULL, NULL, UPDATE_WAIT);
330 if (error || error2) {
331 brelse(hbp, 0);
332 if (error)
333 return error;
334 return error2;
335 }
336 offset = ulfs_rw64(q2h->q2h_free, needswap);
337 KASSERT(offset != 0);
338 }
339 dq->dq2_lblkno = (offset >> ump->um_mountp->mnt_fs_bshift);
340 dq->dq2_blkoff = (offset & ump->umq2_bmask);
341 if (dq->dq2_lblkno == 0) {
342 bp = hbp;
343 q2e = (void *)((char *)bp->b_data + dq->dq2_blkoff);
344 } else {
345 error = getq2e(ump, type, dq->dq2_lblkno,
346 dq->dq2_blkoff, &bp, &q2e, B_MODIFY);
347 if (error) {
348 brelse(hbp, 0);
349 return error;
350 }
351 }
352 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
353 /* remove from free list */
354 q2h->q2h_free = q2e->q2e_next;
355
356 memcpy(q2e, &q2h->q2h_defentry, sizeof(*q2e));
357 q2e->q2e_uid = ulfs_rw32(uid, needswap);
358 /* insert in hash list */
359 q2e->q2e_next = q2h->q2h_entries[uid & hash_mask];
360 q2h->q2h_entries[uid & hash_mask] = ulfs_rw64(offset, needswap);
361 if (hbp != bp) {
362 bwrite(hbp);
363 }
364 bwrite(bp);
365 return 0;
366 }
367
368 static int
369 getinoquota2(struct inode *ip, bool alloc, bool modify, struct buf **bpp,
370 struct quota2_entry **q2ep)
371 {
372 int error;
373 int i;
374 struct dquot *dq;
375 struct ulfsmount *ump = ip->i_ump;
376 u_int32_t ino_ids[ULFS_MAXQUOTAS];
377
378 error = lfs_getinoquota(ip);
379 if (error)
380 return error;
381
382 ino_ids[ULFS_USRQUOTA] = ip->i_uid;
383 ino_ids[ULFS_GRPQUOTA] = ip->i_gid;
384 /* first get the interlock for all dquot */
385 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
386 dq = ip->i_dquot[i];
387 if (dq == NODQUOT)
388 continue;
389 mutex_enter(&dq->dq_interlock);
390 }
391 /* now get the corresponding quota entry */
392 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
393 bpp[i] = NULL;
394 q2ep[i] = NULL;
395 dq = ip->i_dquot[i];
396 if (dq == NODQUOT)
397 continue;
398 if (__predict_false(ump->um_quotas[i] == NULL)) {
399 /*
400 * quotas have been turned off. This can happen
401 * at umount time.
402 */
403 mutex_exit(&dq->dq_interlock);
404 lfs_dqrele(NULLVP, dq);
405 ip->i_dquot[i] = NULL;
406 continue;
407 }
408
409 if ((dq->dq2_lblkno | dq->dq2_blkoff) == 0) {
410 if (!alloc) {
411 continue;
412 }
413 /* need to alloc a new on-disk quot */
414 mutex_enter(&lfs_dqlock);
415 error = quota2_q2ealloc(ump, i, ino_ids[i], dq);
416 mutex_exit(&lfs_dqlock);
417 if (error)
418 return error;
419 }
420 KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
421 error = getq2e(ump, i, dq->dq2_lblkno,
422 dq->dq2_blkoff, &bpp[i], &q2ep[i],
423 modify ? B_MODIFY : 0);
424 if (error)
425 return error;
426 }
427 return 0;
428 }
429
430 __inline static int __unused
431 lfsquota2_check_limit(struct quota2_val *q2v, uint64_t change, time_t now)
432 {
433 return lfsquota_check_limit(q2v->q2v_cur, change, q2v->q2v_softlimit,
434 q2v->q2v_hardlimit, q2v->q2v_time, now);
435 }
436
437 static int
438 quota2_check(struct inode *ip, int vtype, int64_t change, kauth_cred_t cred,
439 int flags)
440 {
441 int error;
442 struct buf *bp[ULFS_MAXQUOTAS];
443 struct quota2_entry *q2e[ULFS_MAXQUOTAS];
444 struct quota2_val *q2vp;
445 struct dquot *dq;
446 uint64_t ncurblks;
447 struct ulfsmount *ump = ip->i_ump;
448 struct lfs *fs = ip->i_lfs;
449 struct mount *mp = ump->um_mountp;
450 const int needswap = ULFS_MPNEEDSWAP(fs);
451 int i;
452
453 if ((error = getinoquota2(ip, change > 0, change != 0, bp, q2e)) != 0)
454 return error;
455 if (change == 0) {
456 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
457 dq = ip->i_dquot[i];
458 if (dq == NODQUOT)
459 continue;
460 if (bp[i])
461 brelse(bp[i], 0);
462 mutex_exit(&dq->dq_interlock);
463 }
464 return 0;
465 }
466 if (change < 0) {
467 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
468 dq = ip->i_dquot[i];
469 if (dq == NODQUOT)
470 continue;
471 if (q2e[i] == NULL) {
472 mutex_exit(&dq->dq_interlock);
473 continue;
474 }
475 q2vp = &q2e[i]->q2e_val[vtype];
476 ncurblks = ulfs_rw64(q2vp->q2v_cur, needswap);
477 if (ncurblks < -change)
478 ncurblks = 0;
479 else
480 ncurblks += change;
481 q2vp->q2v_cur = ulfs_rw64(ncurblks, needswap);
482 quota2_bwrite(mp, bp[i]);
483 mutex_exit(&dq->dq_interlock);
484 }
485 return 0;
486 }
487 /* see if the allocation is allowed */
488 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
489 struct quota2_val q2v;
490 int ql_stat;
491 dq = ip->i_dquot[i];
492 if (dq == NODQUOT)
493 continue;
494 KASSERT(q2e[i] != NULL);
495 lfsquota2_ulfs_rwq2v(&q2e[i]->q2e_val[vtype], &q2v, needswap);
496 ql_stat = lfsquota2_check_limit(&q2v, change, time_second);
497
498 if ((flags & FORCE) == 0 &&
499 kauth_authorize_system(cred, KAUTH_SYSTEM_FS_QUOTA,
500 KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT,
501 KAUTH_ARG(i), KAUTH_ARG(vtype), NULL) != 0) {
502 /* enforce this limit */
503 switch(QL_STATUS(ql_stat)) {
504 case QL_S_DENY_HARD:
505 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
506 uprintf("\n%s: write failed, %s %s "
507 "limit reached\n",
508 mp->mnt_stat.f_mntonname,
509 lfs_quotatypes[i], limnames[vtype]);
510 dq->dq_flags |= DQ_WARN(vtype);
511 }
512 error = EDQUOT;
513 break;
514 case QL_S_DENY_GRACE:
515 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
516 uprintf("\n%s: write failed, %s %s "
517 "limit reached\n",
518 mp->mnt_stat.f_mntonname,
519 lfs_quotatypes[i], limnames[vtype]);
520 dq->dq_flags |= DQ_WARN(vtype);
521 }
522 error = EDQUOT;
523 break;
524 case QL_S_ALLOW_SOFT:
525 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
526 uprintf("\n%s: warning, %s %s "
527 "quota exceeded\n",
528 mp->mnt_stat.f_mntonname,
529 lfs_quotatypes[i], limnames[vtype]);
530 dq->dq_flags |= DQ_WARN(vtype);
531 }
532 break;
533 }
534 }
535 /*
536 * always do this; we don't know if the allocation will
537 * succed or not in the end. if we don't do the allocation
538 * q2v_time will be ignored anyway
539 */
540 if (ql_stat & QL_F_CROSS) {
541 q2v.q2v_time = time_second + q2v.q2v_grace;
542 lfsquota2_ulfs_rwq2v(&q2v, &q2e[i]->q2e_val[vtype],
543 needswap);
544 }
545 }
546
547 /* now do the allocation if allowed */
548 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
549 dq = ip->i_dquot[i];
550 if (dq == NODQUOT)
551 continue;
552 KASSERT(q2e[i] != NULL);
553 if (error == 0) {
554 q2vp = &q2e[i]->q2e_val[vtype];
555 ncurblks = ulfs_rw64(q2vp->q2v_cur, needswap);
556 q2vp->q2v_cur = ulfs_rw64(ncurblks + change, needswap);
557 quota2_bwrite(mp, bp[i]);
558 } else
559 brelse(bp[i], 0);
560 mutex_exit(&dq->dq_interlock);
561 }
562 return error;
563 }
564
565 int
566 lfs_chkdq2(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
567 {
568 return quota2_check(ip, QL_BLOCK, change, cred, flags);
569 }
570
571 int
572 lfs_chkiq2(struct inode *ip, int32_t change, kauth_cred_t cred, int flags)
573 {
574 return quota2_check(ip, QL_FILE, change, cred, flags);
575 }
576
577 int
578 lfsquota2_handle_cmd_put(struct ulfsmount *ump, const struct quotakey *key,
579 const struct quotaval *val)
580 {
581 int error;
582 struct dquot *dq;
583 struct quota2_header *q2h;
584 struct quota2_entry q2e, *q2ep;
585 struct buf *bp;
586 struct lfs *fs = ump->um_lfs;
587 const int needswap = ULFS_MPNEEDSWAP(fs);
588
589 /* make sure we can index by the fs-independent idtype */
590 CTASSERT(QUOTA_IDTYPE_USER == ULFS_USRQUOTA);
591 CTASSERT(QUOTA_IDTYPE_GROUP == ULFS_GRPQUOTA);
592
593 if (ump->um_quotas[key->qk_idtype] == NULLVP)
594 return ENODEV;
595
596 if (key->qk_id == QUOTA_DEFAULTID) {
597 mutex_enter(&lfs_dqlock);
598 error = getq2h(ump, key->qk_idtype, &bp, &q2h, B_MODIFY);
599 if (error) {
600 mutex_exit(&lfs_dqlock);
601 goto out_wapbl;
602 }
603 lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
604 quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
605 lfsquota2_ulfs_rwq2e(&q2e, &q2h->q2h_defentry, needswap);
606 mutex_exit(&lfs_dqlock);
607 quota2_bwrite(ump->um_mountp, bp);
608 goto out_wapbl;
609 }
610
611 error = lfs_dqget(NULLVP, key->qk_id, ump, key->qk_idtype, &dq);
612 if (error)
613 goto out_wapbl;
614
615 mutex_enter(&dq->dq_interlock);
616 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
617 /* need to alloc a new on-disk quot */
618 mutex_enter(&lfs_dqlock);
619 error = quota2_q2ealloc(ump, key->qk_idtype, key->qk_id, dq);
620 mutex_exit(&lfs_dqlock);
621 if (error)
622 goto out_il;
623 }
624 KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
625 error = getq2e(ump, key->qk_idtype, dq->dq2_lblkno,
626 dq->dq2_blkoff, &bp, &q2ep, B_MODIFY);
627 if (error)
628 goto out_il;
629
630 lfsquota2_ulfs_rwq2e(q2ep, &q2e, needswap);
631 quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
632 lfsquota2_ulfs_rwq2e(&q2e, q2ep, needswap);
633 quota2_bwrite(ump->um_mountp, bp);
634
635 out_il:
636 mutex_exit(&dq->dq_interlock);
637 lfs_dqrele(NULLVP, dq);
638 out_wapbl:
639 return error;
640 }
641
642 struct dq2clear_callback {
643 uid_t id;
644 struct dquot *dq;
645 struct quota2_header *q2h;
646 };
647
648 static int
649 dq2clear_callback(struct ulfsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
650 uint64_t off, void *v)
651 {
652 struct dq2clear_callback *c = v;
653 struct lfs *fs = ump->um_lfs;
654 const int needswap = ULFS_MPNEEDSWAP(fs);
655 uint64_t myoff;
656
657 if (ulfs_rw32(q2e->q2e_uid, needswap) == c->id) {
658 KASSERT(mutex_owned(&c->dq->dq_interlock));
659 c->dq->dq2_lblkno = 0;
660 c->dq->dq2_blkoff = 0;
661 myoff = *offp;
662 /* remove from hash list */
663 *offp = q2e->q2e_next;
664 /* add to free list */
665 q2e->q2e_next = c->q2h->q2h_free;
666 c->q2h->q2h_free = myoff;
667 return Q2WL_ABORT;
668 }
669 return 0;
670 }
671 int
672 lfsquota2_handle_cmd_del(struct ulfsmount *ump, const struct quotakey *qk)
673 {
674 int idtype;
675 id_t id;
676 int objtype;
677 int error, i, canfree;
678 struct dquot *dq;
679 struct quota2_header *q2h;
680 struct quota2_entry q2e, *q2ep;
681 struct buf *hbp, *bp;
682 u_long hash_mask;
683 struct dq2clear_callback c;
684
685 idtype = qk->qk_idtype;
686 id = qk->qk_id;
687 objtype = qk->qk_objtype;
688
689 if (ump->um_quotas[idtype] == NULLVP)
690 return ENODEV;
691 if (id == QUOTA_DEFAULTID)
692 return EOPNOTSUPP;
693
694 /* get the default entry before locking the entry's buffer */
695 mutex_enter(&lfs_dqlock);
696 error = getq2h(ump, idtype, &hbp, &q2h, 0);
697 if (error) {
698 mutex_exit(&lfs_dqlock);
699 return error;
700 }
701 /* we'll copy to another disk entry, so no need to swap */
702 memcpy(&q2e, &q2h->q2h_defentry, sizeof(q2e));
703 mutex_exit(&lfs_dqlock);
704 brelse(hbp, 0);
705
706 error = lfs_dqget(NULLVP, id, ump, idtype, &dq);
707 if (error)
708 return error;
709
710 mutex_enter(&dq->dq_interlock);
711 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
712 /* already clear, nothing to do */
713 error = ENOENT;
714 goto out_il;
715 }
716
717 error = getq2e(ump, idtype, dq->dq2_lblkno, dq->dq2_blkoff,
718 &bp, &q2ep, B_MODIFY);
719 if (error)
720 goto out_wapbl;
721
722 /* make sure we can index by the objtype passed in */
723 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
724 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
725
726 /* clear the requested objtype by copying from the default entry */
727 q2ep->q2e_val[objtype].q2v_softlimit =
728 q2e.q2e_val[objtype].q2v_softlimit;
729 q2ep->q2e_val[objtype].q2v_hardlimit =
730 q2e.q2e_val[objtype].q2v_hardlimit;
731 q2ep->q2e_val[objtype].q2v_grace =
732 q2e.q2e_val[objtype].q2v_grace;
733 q2ep->q2e_val[objtype].q2v_time = 0;
734
735 /* if this entry now contains no information, we can free it */
736 canfree = 1;
737 for (i = 0; i < N_QL; i++) {
738 if (q2ep->q2e_val[i].q2v_cur != 0 ||
739 (q2ep->q2e_val[i].q2v_softlimit !=
740 q2e.q2e_val[i].q2v_softlimit) ||
741 (q2ep->q2e_val[i].q2v_hardlimit !=
742 q2e.q2e_val[i].q2v_hardlimit) ||
743 (q2ep->q2e_val[i].q2v_grace !=
744 q2e.q2e_val[i].q2v_grace)) {
745 canfree = 0;
746 break;
747 }
748 /* note: do not need to check q2v_time */
749 }
750
751 if (canfree == 0) {
752 quota2_bwrite(ump->um_mountp, bp);
753 goto out_wapbl;
754 }
755 /* we can free it. release bp so we can walk the list */
756 brelse(bp, 0);
757 mutex_enter(&lfs_dqlock);
758 error = getq2h(ump, idtype, &hbp, &q2h, 0);
759 if (error)
760 goto out_dqlock;
761
762 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
763 c.dq = dq;
764 c.id = id;
765 c.q2h = q2h;
766 error = quota2_walk_list(ump, hbp, idtype,
767 &q2h->q2h_entries[id & hash_mask], B_MODIFY, &c,
768 dq2clear_callback);
769
770 bwrite(hbp);
771
772 out_dqlock:
773 mutex_exit(&lfs_dqlock);
774 out_wapbl:
775 out_il:
776 mutex_exit(&dq->dq_interlock);
777 lfs_dqrele(NULLVP, dq);
778 return error;
779 }
780
781 static int
782 quota2_fetch_q2e(struct ulfsmount *ump, const struct quotakey *qk,
783 struct quota2_entry *ret)
784 {
785 struct dquot *dq;
786 int error;
787 struct quota2_entry *q2ep;
788 struct buf *bp;
789 struct lfs *fs = ump->um_lfs;
790 const int needswap = ULFS_MPNEEDSWAP(fs);
791
792 error = lfs_dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
793 if (error)
794 return error;
795
796 mutex_enter(&dq->dq_interlock);
797 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
798 mutex_exit(&dq->dq_interlock);
799 lfs_dqrele(NULLVP, dq);
800 return ENOENT;
801 }
802 error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
803 &bp, &q2ep, 0);
804 if (error) {
805 mutex_exit(&dq->dq_interlock);
806 lfs_dqrele(NULLVP, dq);
807 return error;
808 }
809 lfsquota2_ulfs_rwq2e(q2ep, ret, needswap);
810 brelse(bp, 0);
811 mutex_exit(&dq->dq_interlock);
812 lfs_dqrele(NULLVP, dq);
813
814 return 0;
815 }
816
817 static int
818 quota2_fetch_quotaval(struct ulfsmount *ump, const struct quotakey *qk,
819 struct quotaval *ret)
820 {
821 struct dquot *dq;
822 int error;
823 struct quota2_entry *q2ep, q2e;
824 struct buf *bp;
825 struct lfs *fs = ump->um_lfs;
826 const int needswap = ULFS_MPNEEDSWAP(fs);
827 id_t id2;
828
829 error = lfs_dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
830 if (error)
831 return error;
832
833 mutex_enter(&dq->dq_interlock);
834 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
835 mutex_exit(&dq->dq_interlock);
836 lfs_dqrele(NULLVP, dq);
837 return ENOENT;
838 }
839 error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
840 &bp, &q2ep, 0);
841 if (error) {
842 mutex_exit(&dq->dq_interlock);
843 lfs_dqrele(NULLVP, dq);
844 return error;
845 }
846 lfsquota2_ulfs_rwq2e(q2ep, &q2e, needswap);
847 brelse(bp, 0);
848 mutex_exit(&dq->dq_interlock);
849 lfs_dqrele(NULLVP, dq);
850
851 q2e_to_quotaval(&q2e, 0, &id2, qk->qk_objtype, ret);
852 KASSERT(id2 == qk->qk_id);
853 return 0;
854 }
855
856 int
857 lfsquota2_handle_cmd_get(struct ulfsmount *ump, const struct quotakey *qk,
858 struct quotaval *qv)
859 {
860 int error;
861 struct quota2_header *q2h;
862 struct quota2_entry q2e;
863 struct buf *bp;
864 struct lfs *fs = ump->um_lfs;
865 const int needswap = ULFS_MPNEEDSWAP(fs);
866 id_t id2;
867
868 /*
869 * Make sure the FS-independent codes match the internal ones,
870 * so we can use the passed-in objtype without having to
871 * convert it explicitly to QL_BLOCK/QL_FILE.
872 */
873 CTASSERT(QL_BLOCK == QUOTA_OBJTYPE_BLOCKS);
874 CTASSERT(QL_FILE == QUOTA_OBJTYPE_FILES);
875 CTASSERT(N_QL == 2);
876
877 if (qk->qk_objtype < 0 || qk->qk_objtype >= N_QL) {
878 return EINVAL;
879 }
880
881 if (ump->um_quotas[qk->qk_idtype] == NULLVP)
882 return ENODEV;
883 if (qk->qk_id == QUOTA_DEFAULTID) {
884 mutex_enter(&lfs_dqlock);
885 error = getq2h(ump, qk->qk_idtype, &bp, &q2h, 0);
886 if (error) {
887 mutex_exit(&lfs_dqlock);
888 return error;
889 }
890 lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
891 mutex_exit(&lfs_dqlock);
892 brelse(bp, 0);
893 q2e_to_quotaval(&q2e, qk->qk_id == QUOTA_DEFAULTID, &id2,
894 qk->qk_objtype, qv);
895 (void)id2;
896 } else
897 error = quota2_fetch_quotaval(ump, qk, qv);
898
899 return error;
900 }
901
902 /*
903 * Cursor structure we used.
904 *
905 * This will get stored in userland between calls so we must not assume
906 * it isn't arbitrarily corrupted.
907 */
908 struct ulfsq2_cursor {
909 uint32_t q2c_magic; /* magic number */
910 int q2c_hashsize; /* size of hash table at last go */
911
912 int q2c_users_done; /* true if we've returned all user data */
913 int q2c_groups_done; /* true if we've returned all group data */
914 int q2c_defaults_done; /* true if we've returned the default values */
915 int q2c_hashpos; /* slot to start at in hash table */
916 int q2c_uidpos; /* number of ids we've handled */
917 int q2c_blocks_done; /* true if we've returned the blocks value */
918 };
919
920 /*
921 * State of a single cursorget call, or at least the part of it that
922 * needs to be passed around.
923 */
924 struct q2cursor_state {
925 /* data return pointers */
926 struct quotakey *keys;
927 struct quotaval *vals;
928
929 /* key/value counters */
930 unsigned maxkeyvals;
931 unsigned numkeys; /* number of keys assigned */
932
933 /* ID to key/value conversion state */
934 int skipfirst; /* if true skip first key/value */
935 int skiplast; /* if true skip last key/value */
936
937 /* ID counters */
938 unsigned maxids; /* maximum number of IDs to handle */
939 unsigned numids; /* number of IDs handled */
940 };
941
942 /*
943 * Additional structure for getids callback.
944 */
945 struct q2cursor_getids {
946 struct q2cursor_state *state;
947 int idtype;
948 unsigned skip; /* number of ids to skip over */
949 unsigned new_skip; /* number of ids to skip over next time */
950 unsigned skipped; /* number skipped so far */
951 int stopped; /* true if we stopped quota_walk_list early */
952 };
953
954 /*
955 * Cursor-related functions
956 */
957
958 /* magic number */
959 #define Q2C_MAGIC (0xbeebe111)
960
961 /* extract cursor from caller form */
962 #define Q2CURSOR(qkc) ((struct ulfsq2_cursor *)&qkc->u.qkc_space[0])
963
964 /*
965 * Check that a cursor we're handed is something like valid. If
966 * someone munges it and it still passes these checks, they'll get
967 * partial or odd results back but won't break anything.
968 */
969 static int
970 q2cursor_check(struct ulfsq2_cursor *cursor)
971 {
972 if (cursor->q2c_magic != Q2C_MAGIC) {
973 return EINVAL;
974 }
975 if (cursor->q2c_hashsize < 0) {
976 return EINVAL;
977 }
978
979 if (cursor->q2c_users_done != 0 && cursor->q2c_users_done != 1) {
980 return EINVAL;
981 }
982 if (cursor->q2c_groups_done != 0 && cursor->q2c_groups_done != 1) {
983 return EINVAL;
984 }
985 if (cursor->q2c_defaults_done != 0 && cursor->q2c_defaults_done != 1) {
986 return EINVAL;
987 }
988 if (cursor->q2c_hashpos < 0 || cursor->q2c_uidpos < 0) {
989 return EINVAL;
990 }
991 if (cursor->q2c_blocks_done != 0 && cursor->q2c_blocks_done != 1) {
992 return EINVAL;
993 }
994 return 0;
995 }
996
997 /*
998 * Set up the q2cursor state.
999 */
1000 static void
1001 q2cursor_initstate(struct q2cursor_state *state, struct quotakey *keys,
1002 struct quotaval *vals, unsigned maxkeyvals, int blocks_done)
1003 {
1004 state->keys = keys;
1005 state->vals = vals;
1006
1007 state->maxkeyvals = maxkeyvals;
1008 state->numkeys = 0;
1009
1010 /*
1011 * For each ID there are two quotavals to return. If the
1012 * maximum number of entries to return is odd, we might want
1013 * to skip the first quotaval of the first ID, or the last
1014 * quotaval of the last ID, but not both. So the number of IDs
1015 * we want is (up to) half the number of return slots we have,
1016 * rounded up.
1017 */
1018
1019 state->maxids = (state->maxkeyvals + 1) / 2;
1020 state->numids = 0;
1021 if (state->maxkeyvals % 2) {
1022 if (blocks_done) {
1023 state->skipfirst = 1;
1024 state->skiplast = 0;
1025 } else {
1026 state->skipfirst = 0;
1027 state->skiplast = 1;
1028 }
1029 } else {
1030 state->skipfirst = 0;
1031 state->skiplast = 0;
1032 }
1033 }
1034
1035 /*
1036 * Choose which idtype we're going to work on. If doing a full
1037 * iteration, we do users first, then groups, but either might be
1038 * disabled or marked to skip via cursorsetidtype(), so don't make
1039 * silly assumptions.
1040 */
1041 static int
1042 q2cursor_pickidtype(struct ulfsq2_cursor *cursor, int *idtype_ret)
1043 {
1044 if (cursor->q2c_users_done == 0) {
1045 *idtype_ret = QUOTA_IDTYPE_USER;
1046 } else if (cursor->q2c_groups_done == 0) {
1047 *idtype_ret = QUOTA_IDTYPE_GROUP;
1048 } else {
1049 return EAGAIN;
1050 }
1051 return 0;
1052 }
1053
1054 /*
1055 * Add an ID to the current state. Sets up either one or two keys to
1056 * refer to it, depending on whether it's first/last and the setting
1057 * of skipfirst. (skiplast does not need to be explicitly tested)
1058 */
1059 static void
1060 q2cursor_addid(struct q2cursor_state *state, int idtype, id_t id)
1061 {
1062 KASSERT(state->numids < state->maxids);
1063 KASSERT(state->numkeys < state->maxkeyvals);
1064
1065 if (!state->skipfirst || state->numkeys > 0) {
1066 state->keys[state->numkeys].qk_idtype = idtype;
1067 state->keys[state->numkeys].qk_id = id;
1068 state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_BLOCKS;
1069 state->numkeys++;
1070 }
1071 if (state->numkeys < state->maxkeyvals) {
1072 state->keys[state->numkeys].qk_idtype = idtype;
1073 state->keys[state->numkeys].qk_id = id;
1074 state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_FILES;
1075 state->numkeys++;
1076 } else {
1077 KASSERT(state->skiplast);
1078 }
1079 state->numids++;
1080 }
1081
1082 /*
1083 * Callback function for getting IDs. Update counting and call addid.
1084 */
1085 static int
1086 q2cursor_getids_callback(struct ulfsmount *ump, uint64_t *offp,
1087 struct quota2_entry *q2ep, uint64_t off, void *v)
1088 {
1089 struct q2cursor_getids *gi = v;
1090 id_t id;
1091 struct lfs *fs = ump->um_lfs;
1092 const int needswap = ULFS_MPNEEDSWAP(fs);
1093
1094 if (gi->skipped < gi->skip) {
1095 gi->skipped++;
1096 return 0;
1097 }
1098 id = ulfs_rw32(q2ep->q2e_uid, needswap);
1099 q2cursor_addid(gi->state, gi->idtype, id);
1100 gi->new_skip++;
1101 if (gi->state->numids >= gi->state->maxids) {
1102 /* got enough ids, stop now */
1103 gi->stopped = 1;
1104 return Q2WL_ABORT;
1105 }
1106 return 0;
1107 }
1108
1109 /*
1110 * Fill in a batch of quotakeys by scanning one or more hash chains.
1111 */
1112 static int
1113 q2cursor_getkeys(struct ulfsmount *ump, int idtype, struct ulfsq2_cursor *cursor,
1114 struct q2cursor_state *state,
1115 int *hashsize_ret, struct quota2_entry *default_q2e_ret)
1116 {
1117 struct lfs *fs = ump->um_lfs;
1118 const int needswap = ULFS_MPNEEDSWAP(fs);
1119 struct buf *hbp;
1120 struct quota2_header *q2h;
1121 int quota2_hash_size;
1122 struct q2cursor_getids gi;
1123 uint64_t offset;
1124 int error;
1125
1126 /*
1127 * Read the header block.
1128 */
1129
1130 mutex_enter(&lfs_dqlock);
1131 error = getq2h(ump, idtype, &hbp, &q2h, 0);
1132 if (error) {
1133 mutex_exit(&lfs_dqlock);
1134 return error;
1135 }
1136
1137 /* if the table size has changed, make the caller start over */
1138 quota2_hash_size = ulfs_rw16(q2h->q2h_hash_size, needswap);
1139 if (cursor->q2c_hashsize == 0) {
1140 cursor->q2c_hashsize = quota2_hash_size;
1141 } else if (cursor->q2c_hashsize != quota2_hash_size) {
1142 error = EDEADLK;
1143 goto scanfail;
1144 }
1145
1146 /* grab the entry with the default values out of the header */
1147 lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, default_q2e_ret, needswap);
1148
1149 /* If we haven't done the defaults yet, that goes first. */
1150 if (cursor->q2c_defaults_done == 0) {
1151 q2cursor_addid(state, idtype, QUOTA_DEFAULTID);
1152 /* if we read both halves, mark it done */
1153 if (state->numids < state->maxids || !state->skiplast) {
1154 cursor->q2c_defaults_done = 1;
1155 }
1156 }
1157
1158 gi.state = state;
1159 gi.idtype = idtype;
1160
1161 while (state->numids < state->maxids) {
1162 if (cursor->q2c_hashpos >= quota2_hash_size) {
1163 /* nothing more left */
1164 break;
1165 }
1166
1167 /* scan this hash chain */
1168 gi.skip = cursor->q2c_uidpos;
1169 gi.new_skip = gi.skip;
1170 gi.skipped = 0;
1171 gi.stopped = 0;
1172 offset = q2h->q2h_entries[cursor->q2c_hashpos];
1173
1174 error = quota2_walk_list(ump, hbp, idtype, &offset, 0, &gi,
1175 q2cursor_getids_callback);
1176 KASSERT(error != Q2WL_ABORT);
1177 if (error) {
1178 break;
1179 }
1180 if (gi.stopped) {
1181 /* callback stopped before reading whole chain */
1182 cursor->q2c_uidpos = gi.new_skip;
1183 /* if we didn't get both halves, back up */
1184 if (state->numids == state->maxids && state->skiplast){
1185 KASSERT(cursor->q2c_uidpos > 0);
1186 cursor->q2c_uidpos--;
1187 }
1188 } else {
1189 /* read whole chain */
1190 /* if we got both halves of the last id, advance */
1191 if (state->numids < state->maxids || !state->skiplast){
1192 cursor->q2c_uidpos = 0;
1193 cursor->q2c_hashpos++;
1194 }
1195 }
1196 }
1197
1198 scanfail:
1199 mutex_exit(&lfs_dqlock);
1200 brelse(hbp, 0);
1201 if (error)
1202 return error;
1203
1204 *hashsize_ret = quota2_hash_size;
1205 return 0;
1206 }
1207
1208 /*
1209 * Fetch the quotavals for the quotakeys.
1210 */
1211 static int
1212 q2cursor_getvals(struct ulfsmount *ump, struct q2cursor_state *state,
1213 const struct quota2_entry *default_q2e)
1214 {
1215 int hasid;
1216 id_t loadedid, id;
1217 unsigned pos;
1218 struct quota2_entry q2e;
1219 int objtype;
1220 int error;
1221
1222 hasid = 0;
1223 loadedid = 0;
1224 for (pos = 0; pos < state->numkeys; pos++) {
1225 id = state->keys[pos].qk_id;
1226 if (!hasid || id != loadedid) {
1227 hasid = 1;
1228 loadedid = id;
1229 if (id == QUOTA_DEFAULTID) {
1230 q2e = *default_q2e;
1231 } else {
1232 error = quota2_fetch_q2e(ump,
1233 &state->keys[pos],
1234 &q2e);
1235 if (error == ENOENT) {
1236 /* something changed - start over */
1237 error = EDEADLK;
1238 }
1239 if (error) {
1240 return error;
1241 }
1242 }
1243 }
1244
1245
1246 objtype = state->keys[pos].qk_objtype;
1247 KASSERT(objtype >= 0 && objtype < N_QL);
1248 q2val_to_quotaval(&q2e.q2e_val[objtype], &state->vals[pos]);
1249 }
1250
1251 return 0;
1252 }
1253
1254 /*
1255 * Handle cursorget.
1256 *
1257 * We can't just read keys and values directly, because we can't walk
1258 * the list with qdlock and grab dq_interlock to read the entries at
1259 * the same time. So we're going to do two passes: one to figure out
1260 * which IDs we want and fill in the keys, and then a second to use
1261 * the keys to fetch the values.
1262 */
1263 int
1264 lfsquota2_handle_cmd_cursorget(struct ulfsmount *ump, struct quotakcursor *qkc,
1265 struct quotakey *keys, struct quotaval *vals, unsigned maxreturn,
1266 unsigned *ret)
1267 {
1268 int error;
1269 struct ulfsq2_cursor *cursor;
1270 struct ulfsq2_cursor newcursor;
1271 struct q2cursor_state state;
1272 struct quota2_entry default_q2e;
1273 int idtype;
1274 int quota2_hash_size = 0; /* XXXuninit */
1275
1276 /*
1277 * Convert and validate the cursor.
1278 */
1279 cursor = Q2CURSOR(qkc);
1280 error = q2cursor_check(cursor);
1281 if (error) {
1282 return error;
1283 }
1284
1285 /*
1286 * Make sure our on-disk codes match the values of the
1287 * FS-independent ones. This avoids the need for explicit
1288 * conversion (which would be a NOP anyway and thus easily
1289 * left out or called in the wrong places...)
1290 */
1291 CTASSERT(QUOTA_IDTYPE_USER == ULFS_USRQUOTA);
1292 CTASSERT(QUOTA_IDTYPE_GROUP == ULFS_GRPQUOTA);
1293 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
1294 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
1295
1296 /*
1297 * If some of the idtypes aren't configured/enabled, arrange
1298 * to skip over them.
1299 */
1300 if (cursor->q2c_users_done == 0 &&
1301 ump->um_quotas[ULFS_USRQUOTA] == NULLVP) {
1302 cursor->q2c_users_done = 1;
1303 }
1304 if (cursor->q2c_groups_done == 0 &&
1305 ump->um_quotas[ULFS_GRPQUOTA] == NULLVP) {
1306 cursor->q2c_groups_done = 1;
1307 }
1308
1309 /* Loop over, potentially, both idtypes */
1310 while (1) {
1311
1312 /* Choose id type */
1313 error = q2cursor_pickidtype(cursor, &idtype);
1314 if (error == EAGAIN) {
1315 /* nothing more to do, return 0 */
1316 *ret = 0;
1317 return 0;
1318 }
1319 KASSERT(ump->um_quotas[idtype] != NULLVP);
1320
1321 /*
1322 * Initialize the per-call iteration state. Copy the
1323 * cursor state so we can update it in place but back
1324 * out on error.
1325 */
1326 q2cursor_initstate(&state, keys, vals, maxreturn,
1327 cursor->q2c_blocks_done);
1328 newcursor = *cursor;
1329
1330 /* Assign keys */
1331 error = q2cursor_getkeys(ump, idtype, &newcursor, &state,
1332 "a2_hash_size, &default_q2e);
1333 if (error) {
1334 return error;
1335 }
1336
1337 /* Now fill in the values. */
1338 error = q2cursor_getvals(ump, &state, &default_q2e);
1339 if (error) {
1340 return error;
1341 }
1342
1343 /*
1344 * Now that we aren't going to fail and lose what we
1345 * did so far, we can update the cursor state.
1346 */
1347
1348 if (newcursor.q2c_hashpos >= quota2_hash_size) {
1349 if (idtype == QUOTA_IDTYPE_USER)
1350 cursor->q2c_users_done = 1;
1351 else
1352 cursor->q2c_groups_done = 1;
1353
1354 /* start over on another id type */
1355 cursor->q2c_hashsize = 0;
1356 cursor->q2c_defaults_done = 0;
1357 cursor->q2c_hashpos = 0;
1358 cursor->q2c_uidpos = 0;
1359 cursor->q2c_blocks_done = 0;
1360 } else {
1361 *cursor = newcursor;
1362 cursor->q2c_blocks_done = state.skiplast;
1363 }
1364
1365 /*
1366 * If we have something to return, return it.
1367 * Otherwise, continue to the other idtype, if any,
1368 * and only return zero at end of iteration.
1369 */
1370 if (state.numkeys > 0) {
1371 break;
1372 }
1373 }
1374
1375 *ret = state.numkeys;
1376 return 0;
1377 }
1378
1379 int
1380 lfsquota2_handle_cmd_cursoropen(struct ulfsmount *ump, struct quotakcursor *qkc)
1381 {
1382 struct ulfsq2_cursor *cursor;
1383
1384 CTASSERT(sizeof(*cursor) <= sizeof(qkc->u.qkc_space));
1385 cursor = Q2CURSOR(qkc);
1386
1387 cursor->q2c_magic = Q2C_MAGIC;
1388 cursor->q2c_hashsize = 0;
1389
1390 cursor->q2c_users_done = 0;
1391 cursor->q2c_groups_done = 0;
1392 cursor->q2c_defaults_done = 0;
1393 cursor->q2c_hashpos = 0;
1394 cursor->q2c_uidpos = 0;
1395 cursor->q2c_blocks_done = 0;
1396 return 0;
1397 }
1398
1399 int
1400 lfsquota2_handle_cmd_cursorclose(struct ulfsmount *ump, struct quotakcursor *qkc)
1401 {
1402 struct ulfsq2_cursor *cursor;
1403 int error;
1404
1405 cursor = Q2CURSOR(qkc);
1406 error = q2cursor_check(cursor);
1407 if (error) {
1408 return error;
1409 }
1410
1411 /* nothing to do */
1412
1413 return 0;
1414 }
1415
1416 int
1417 lfsquota2_handle_cmd_cursorskipidtype(struct ulfsmount *ump,
1418 struct quotakcursor *qkc, int idtype)
1419 {
1420 struct ulfsq2_cursor *cursor;
1421 int error;
1422
1423 cursor = Q2CURSOR(qkc);
1424 error = q2cursor_check(cursor);
1425 if (error) {
1426 return error;
1427 }
1428
1429 switch (idtype) {
1430 case QUOTA_IDTYPE_USER:
1431 cursor->q2c_users_done = 1;
1432 break;
1433 case QUOTA_IDTYPE_GROUP:
1434 cursor->q2c_groups_done = 1;
1435 break;
1436 default:
1437 return EINVAL;
1438 }
1439
1440 return 0;
1441 }
1442
1443 int
1444 lfsquota2_handle_cmd_cursoratend(struct ulfsmount *ump, struct quotakcursor *qkc,
1445 int *ret)
1446 {
1447 struct ulfsq2_cursor *cursor;
1448 int error;
1449
1450 cursor = Q2CURSOR(qkc);
1451 error = q2cursor_check(cursor);
1452 if (error) {
1453 return error;
1454 }
1455
1456 *ret = (cursor->q2c_users_done && cursor->q2c_groups_done);
1457 return 0;
1458 }
1459
1460 int
1461 lfsquota2_handle_cmd_cursorrewind(struct ulfsmount *ump, struct quotakcursor *qkc)
1462 {
1463 struct ulfsq2_cursor *cursor;
1464 int error;
1465
1466 cursor = Q2CURSOR(qkc);
1467 error = q2cursor_check(cursor);
1468 if (error) {
1469 return error;
1470 }
1471
1472 cursor->q2c_hashsize = 0;
1473
1474 cursor->q2c_users_done = 0;
1475 cursor->q2c_groups_done = 0;
1476 cursor->q2c_defaults_done = 0;
1477 cursor->q2c_hashpos = 0;
1478 cursor->q2c_uidpos = 0;
1479 cursor->q2c_blocks_done = 0;
1480
1481 return 0;
1482 }
1483
1484 int
1485 lfs_q2sync(struct mount *mp)
1486 {
1487 return 0;
1488 }
1489
1490 struct dq2get_callback {
1491 uid_t id;
1492 struct dquot *dq;
1493 };
1494
1495 static int
1496 dq2get_callback(struct ulfsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
1497 uint64_t off, void *v)
1498 {
1499 struct dq2get_callback *c = v;
1500 daddr_t lblkno;
1501 int blkoff;
1502 struct lfs *fs = ump->um_lfs;
1503 const int needswap = ULFS_MPNEEDSWAP(fs);
1504
1505 if (ulfs_rw32(q2e->q2e_uid, needswap) == c->id) {
1506 KASSERT(mutex_owned(&c->dq->dq_interlock));
1507 lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
1508 blkoff = (off & ump->umq2_bmask);
1509 c->dq->dq2_lblkno = lblkno;
1510 c->dq->dq2_blkoff = blkoff;
1511 return Q2WL_ABORT;
1512 }
1513 return 0;
1514 }
1515
1516 int
1517 lfs_dq2get(struct vnode *dqvp, u_long id, struct ulfsmount *ump, int type,
1518 struct dquot *dq)
1519 {
1520 struct buf *bp;
1521 struct quota2_header *q2h;
1522 int error;
1523 daddr_t offset;
1524 u_long hash_mask;
1525 struct dq2get_callback c = {
1526 .id = id,
1527 .dq = dq
1528 };
1529
1530 KASSERT(mutex_owned(&dq->dq_interlock));
1531 mutex_enter(&lfs_dqlock);
1532 error = getq2h(ump, type, &bp, &q2h, 0);
1533 if (error)
1534 goto out_mutex;
1535 /* look for our entry */
1536 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
1537 offset = q2h->q2h_entries[id & hash_mask];
1538 error = quota2_walk_list(ump, bp, type, &offset, 0, (void *)&c,
1539 dq2get_callback);
1540 brelse(bp, 0);
1541 out_mutex:
1542 mutex_exit(&lfs_dqlock);
1543 return error;
1544 }
1545
1546 int
1547 lfs_dq2sync(struct vnode *vp, struct dquot *dq)
1548 {
1549 return 0;
1550 }
1551
1552 int
1553 lfs_quota2_mount(struct mount *mp)
1554 {
1555 struct ulfsmount *ump = VFSTOULFS(mp);
1556 struct lfs *fs = ump->um_lfs;
1557 int error = 0;
1558 struct vnode *vp;
1559 struct lwp *l = curlwp;
1560
1561 if ((fs->lfs_use_quota2) == 0)
1562 return 0;
1563
1564 fs->um_flags |= ULFS_QUOTA2;
1565 ump->umq2_bsize = fs->lfs_bsize;
1566 ump->umq2_bmask = fs->lfs_bmask;
1567 if (fs->lfs_quota_magic != Q2_HEAD_MAGIC) {
1568 printf("%s: Invalid quota magic number\n",
1569 mp->mnt_stat.f_mntonname);
1570 return EINVAL;
1571 }
1572 if ((fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_USRQUOTA)) &&
1573 fs->lfs_quotaino[ULFS_USRQUOTA] == 0) {
1574 printf("%s: no user quota inode\n",
1575 mp->mnt_stat.f_mntonname);
1576 error = EINVAL;
1577 }
1578 if ((fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_GRPQUOTA)) &&
1579 fs->lfs_quotaino[ULFS_GRPQUOTA] == 0) {
1580 printf("%s: no group quota inode\n",
1581 mp->mnt_stat.f_mntonname);
1582 error = EINVAL;
1583 }
1584 if (error)
1585 return error;
1586
1587 if (fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_USRQUOTA) &&
1588 ump->um_quotas[ULFS_USRQUOTA] == NULLVP) {
1589 error = VFS_VGET(mp, fs->lfs_quotaino[ULFS_USRQUOTA], &vp);
1590 if (error) {
1591 printf("%s: can't vget() user quota inode: %d\n",
1592 mp->mnt_stat.f_mntonname, error);
1593 return error;
1594 }
1595 ump->um_quotas[ULFS_USRQUOTA] = vp;
1596 ump->um_cred[ULFS_USRQUOTA] = l->l_cred;
1597 mutex_enter(vp->v_interlock);
1598 vp->v_writecount++;
1599 mutex_exit(vp->v_interlock);
1600 VOP_UNLOCK(vp);
1601 }
1602 if (fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_GRPQUOTA) &&
1603 ump->um_quotas[ULFS_GRPQUOTA] == NULLVP) {
1604 error = VFS_VGET(mp, fs->lfs_quotaino[ULFS_GRPQUOTA], &vp);
1605 if (error) {
1606 vn_close(ump->um_quotas[ULFS_USRQUOTA],
1607 FREAD|FWRITE, l->l_cred);
1608 printf("%s: can't vget() group quota inode: %d\n",
1609 mp->mnt_stat.f_mntonname, error);
1610 return error;
1611 }
1612 ump->um_quotas[ULFS_GRPQUOTA] = vp;
1613 ump->um_cred[ULFS_GRPQUOTA] = l->l_cred;
1614 mutex_enter(vp->v_interlock);
1615 vp->v_vflag |= VV_SYSTEM;
1616 vp->v_writecount++;
1617 mutex_exit(vp->v_interlock);
1618 VOP_UNLOCK(vp);
1619 }
1620 mp->mnt_flag |= MNT_QUOTA;
1621 return 0;
1622 }
1623