ulfs_quota2.c revision 1.1 1 /* $NetBSD: ulfs_quota2.c,v 1.1 2013/06/06 00:40:55 dholland Exp $ */
2 /* from NetBSD: ufs_quota2.c,v 1.35 2012/09/27 07:47:56 bouyer Exp */
3
4 /*-
5 * Copyright (c) 2010 Manuel Bouyer
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: ulfs_quota2.c,v 1.1 2013/06/06 00:40:55 dholland Exp $");
32
33 #include <sys/buf.h>
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/systm.h>
37 #include <sys/namei.h>
38 #include <sys/file.h>
39 #include <sys/proc.h>
40 #include <sys/vnode.h>
41 #include <sys/mount.h>
42 #include <sys/fstrans.h>
43 #include <sys/kauth.h>
44 #include <sys/wapbl.h>
45 #include <sys/quota.h>
46 #include <sys/quotactl.h>
47
48 #include <ufs/ufs/quota2.h>
49 #include <ufs/ufs/inode.h>
50 #include <ufs/ufs/ufsmount.h>
51 #include <ufs/ufs/ufs_bswap.h>
52 #include <ufs/ufs/ufs_extern.h>
53 #include <ufs/ufs/ufs_quota.h>
54 #include <ufs/ufs/ufs_wapbl.h>
55
56 /*
57 * LOCKING:
58 * Data in the entries are protected by the associated struct dquot's
59 * dq_interlock (this means we can't read or change a quota entry without
60 * grabing a dquot for it).
61 * The header and lists (including pointers in the data entries, and q2e_uid)
62 * are protected by the global dqlock.
63 * the locking order is dq_interlock -> dqlock
64 */
65
66 static int quota2_bwrite(struct mount *, struct buf *);
67 static int getinoquota2(struct inode *, bool, bool, struct buf **,
68 struct quota2_entry **);
69 static int getq2h(struct ufsmount *, int, struct buf **,
70 struct quota2_header **, int);
71 static int getq2e(struct ufsmount *, int, daddr_t, int, struct buf **,
72 struct quota2_entry **, int);
73 static int quota2_walk_list(struct ufsmount *, struct buf *, int,
74 uint64_t *, int, void *,
75 int (*func)(struct ufsmount *, uint64_t *, struct quota2_entry *,
76 uint64_t, void *));
77
78 static const char *limnames[] = INITQLNAMES;
79
80 static void
81 quota2_dict_update_q2e_limits(int objtype, const struct quotaval *val,
82 struct quota2_entry *q2e)
83 {
84 /* make sure we can index q2e_val[] by the fs-independent objtype */
85 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
86 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
87
88 q2e->q2e_val[objtype].q2v_hardlimit = val->qv_hardlimit;
89 q2e->q2e_val[objtype].q2v_softlimit = val->qv_softlimit;
90 q2e->q2e_val[objtype].q2v_grace = val->qv_grace;
91 }
92
93 /*
94 * Convert internal representation to FS-independent representation.
95 * (Note that while the two types are currently identical, the
96 * internal representation is an on-disk struct and the FS-independent
97 * representation is not, and they might diverge in the future.)
98 */
99 static void
100 q2val_to_quotaval(struct quota2_val *q2v, struct quotaval *qv)
101 {
102 qv->qv_softlimit = q2v->q2v_softlimit;
103 qv->qv_hardlimit = q2v->q2v_hardlimit;
104 qv->qv_usage = q2v->q2v_cur;
105 qv->qv_expiretime = q2v->q2v_time;
106 qv->qv_grace = q2v->q2v_grace;
107 }
108
109 /*
110 * Convert a quota2entry and default-flag to the FS-independent
111 * representation.
112 */
113 static void
114 q2e_to_quotaval(struct quota2_entry *q2e, int def,
115 id_t *id, int objtype, struct quotaval *ret)
116 {
117 if (def) {
118 *id = QUOTA_DEFAULTID;
119 } else {
120 *id = q2e->q2e_uid;
121 }
122
123 KASSERT(objtype >= 0 && objtype < N_QL);
124 q2val_to_quotaval(&q2e->q2e_val[objtype], ret);
125 }
126
127
128 static int
129 quota2_bwrite(struct mount *mp, struct buf *bp)
130 {
131 if (mp->mnt_flag & MNT_SYNCHRONOUS)
132 return bwrite(bp);
133 else {
134 bdwrite(bp);
135 return 0;
136 }
137 }
138
139 static int
140 getq2h(struct ufsmount *ump, int type,
141 struct buf **bpp, struct quota2_header **q2hp, int flags)
142 {
143 #ifdef FFS_EI
144 const int needswap = UFS_MPNEEDSWAP(ump);
145 #endif
146 int error;
147 struct buf *bp;
148 struct quota2_header *q2h;
149
150 KASSERT(mutex_owned(&dqlock));
151 error = bread(ump->um_quotas[type], 0, ump->umq2_bsize,
152 ump->um_cred[type], flags, &bp);
153 if (error)
154 return error;
155 if (bp->b_resid != 0)
156 panic("dq2get: %s quota file truncated", quotatypes[type]);
157
158 q2h = (void *)bp->b_data;
159 if (ufs_rw32(q2h->q2h_magic_number, needswap) != Q2_HEAD_MAGIC ||
160 q2h->q2h_type != type)
161 panic("dq2get: corrupted %s quota header", quotatypes[type]);
162 *bpp = bp;
163 *q2hp = q2h;
164 return 0;
165 }
166
167 static int
168 getq2e(struct ufsmount *ump, int type, daddr_t lblkno, int blkoffset,
169 struct buf **bpp, struct quota2_entry **q2ep, int flags)
170 {
171 int error;
172 struct buf *bp;
173
174 if (blkoffset & (sizeof(uint64_t) - 1)) {
175 panic("dq2get: %s quota file corrupted",
176 quotatypes[type]);
177 }
178 error = bread(ump->um_quotas[type], lblkno, ump->umq2_bsize,
179 ump->um_cred[type], flags, &bp);
180 if (error)
181 return error;
182 if (bp->b_resid != 0) {
183 panic("dq2get: %s quota file corrupted",
184 quotatypes[type]);
185 }
186 *q2ep = (void *)((char *)bp->b_data + blkoffset);
187 *bpp = bp;
188 return 0;
189 }
190
191 /* walk a quota entry list, calling the callback for each entry */
192 #define Q2WL_ABORT 0x10000000
193
194 static int
195 quota2_walk_list(struct ufsmount *ump, struct buf *hbp, int type,
196 uint64_t *offp, int flags, void *a,
197 int (*func)(struct ufsmount *, uint64_t *, struct quota2_entry *, uint64_t, void *))
198 {
199 #ifdef FFS_EI
200 const int needswap = UFS_MPNEEDSWAP(ump);
201 #endif
202 daddr_t off = ufs_rw64(*offp, needswap);
203 struct buf *bp, *obp = hbp;
204 int ret = 0, ret2 = 0;
205 struct quota2_entry *q2e;
206 daddr_t lblkno, blkoff, olblkno = 0;
207
208 KASSERT(mutex_owner(&dqlock));
209
210 while (off != 0) {
211 lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
212 blkoff = (off & ump->umq2_bmask);
213 if (lblkno == 0) {
214 /* in the header block */
215 bp = hbp;
216 } else if (lblkno == olblkno) {
217 /* still in the same buf */
218 bp = obp;
219 } else {
220 ret = bread(ump->um_quotas[type], lblkno,
221 ump->umq2_bsize,
222 ump->um_cred[type], flags, &bp);
223 if (ret)
224 return ret;
225 if (bp->b_resid != 0) {
226 panic("quota2_walk_list: %s quota file corrupted",
227 quotatypes[type]);
228 }
229 }
230 q2e = (void *)((char *)(bp->b_data) + blkoff);
231 ret = (*func)(ump, offp, q2e, off, a);
232 if (off != ufs_rw64(*offp, needswap)) {
233 /* callback changed parent's pointer, redo */
234 off = ufs_rw64(*offp, needswap);
235 if (bp != hbp && bp != obp)
236 ret2 = bwrite(bp);
237 } else {
238 /* parent if now current */
239 if (obp != bp && obp != hbp) {
240 if (flags & B_MODIFY)
241 ret2 = bwrite(obp);
242 else
243 brelse(obp, 0);
244 }
245 obp = bp;
246 olblkno = lblkno;
247 offp = &(q2e->q2e_next);
248 off = ufs_rw64(*offp, needswap);
249 }
250 if (ret)
251 break;
252 if (ret2) {
253 ret = ret2;
254 break;
255 }
256 }
257 if (obp != hbp) {
258 if (flags & B_MODIFY)
259 ret2 = bwrite(obp);
260 else
261 brelse(obp, 0);
262 }
263 if (ret & Q2WL_ABORT)
264 return 0;
265 if (ret == 0)
266 return ret2;
267 return ret;
268 }
269
270 int
271 quota2_umount(struct mount *mp, int flags)
272 {
273 int i, error;
274 struct ufsmount *ump = VFSTOUFS(mp);
275
276 if ((ump->um_flags & UFS_QUOTA2) == 0)
277 return 0;
278
279 for (i = 0; i < MAXQUOTAS; i++) {
280 if (ump->um_quotas[i] != NULLVP) {
281 error = vn_close(ump->um_quotas[i], FREAD|FWRITE,
282 ump->um_cred[i]);
283 if (error) {
284 printf("quota2_umount failed: close(%p) %d\n",
285 ump->um_quotas[i], error);
286 return error;
287 }
288 }
289 ump->um_quotas[i] = NULLVP;
290 }
291 return 0;
292 }
293
294 static int
295 quota2_q2ealloc(struct ufsmount *ump, int type, uid_t uid, struct dquot *dq)
296 {
297 int error, error2;
298 struct buf *hbp, *bp;
299 struct quota2_header *q2h;
300 struct quota2_entry *q2e;
301 daddr_t offset;
302 u_long hash_mask;
303 const int needswap = UFS_MPNEEDSWAP(ump);
304
305 KASSERT(mutex_owned(&dq->dq_interlock));
306 KASSERT(mutex_owned(&dqlock));
307 error = getq2h(ump, type, &hbp, &q2h, B_MODIFY);
308 if (error)
309 return error;
310 offset = ufs_rw64(q2h->q2h_free, needswap);
311 if (offset == 0) {
312 struct vnode *vp = ump->um_quotas[type];
313 struct inode *ip = VTOI(vp);
314 uint64_t size = ip->i_size;
315 /* need to alocate a new disk block */
316 error = UFS_BALLOC(vp, size, ump->umq2_bsize,
317 ump->um_cred[type], B_CLRBUF | B_SYNC, &bp);
318 if (error) {
319 brelse(hbp, 0);
320 return error;
321 }
322 KASSERT((ip->i_size % ump->umq2_bsize) == 0);
323 ip->i_size += ump->umq2_bsize;
324 DIP_ASSIGN(ip, size, ip->i_size);
325 ip->i_flag |= IN_CHANGE | IN_UPDATE;
326 uvm_vnp_setsize(vp, ip->i_size);
327 quota2_addfreeq2e(q2h, bp->b_data, size, ump->umq2_bsize,
328 needswap);
329 error = bwrite(bp);
330 error2 = UFS_UPDATE(vp, NULL, NULL, UPDATE_WAIT);
331 if (error || error2) {
332 brelse(hbp, 0);
333 if (error)
334 return error;
335 return error2;
336 }
337 offset = ufs_rw64(q2h->q2h_free, needswap);
338 KASSERT(offset != 0);
339 }
340 dq->dq2_lblkno = (offset >> ump->um_mountp->mnt_fs_bshift);
341 dq->dq2_blkoff = (offset & ump->umq2_bmask);
342 if (dq->dq2_lblkno == 0) {
343 bp = hbp;
344 q2e = (void *)((char *)bp->b_data + dq->dq2_blkoff);
345 } else {
346 error = getq2e(ump, type, dq->dq2_lblkno,
347 dq->dq2_blkoff, &bp, &q2e, B_MODIFY);
348 if (error) {
349 brelse(hbp, 0);
350 return error;
351 }
352 }
353 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
354 /* remove from free list */
355 q2h->q2h_free = q2e->q2e_next;
356
357 memcpy(q2e, &q2h->q2h_defentry, sizeof(*q2e));
358 q2e->q2e_uid = ufs_rw32(uid, needswap);
359 /* insert in hash list */
360 q2e->q2e_next = q2h->q2h_entries[uid & hash_mask];
361 q2h->q2h_entries[uid & hash_mask] = ufs_rw64(offset, needswap);
362 if (hbp != bp) {
363 bwrite(hbp);
364 }
365 bwrite(bp);
366 return 0;
367 }
368
369 static int
370 getinoquota2(struct inode *ip, bool alloc, bool modify, struct buf **bpp,
371 struct quota2_entry **q2ep)
372 {
373 int error;
374 int i;
375 struct dquot *dq;
376 struct ufsmount *ump = ip->i_ump;
377 u_int32_t ino_ids[MAXQUOTAS];
378
379 error = getinoquota(ip);
380 if (error)
381 return error;
382
383 if (alloc) {
384 UFS_WAPBL_JLOCK_ASSERT(ump->um_mountp);
385 }
386 ino_ids[USRQUOTA] = ip->i_uid;
387 ino_ids[GRPQUOTA] = ip->i_gid;
388 /* first get the interlock for all dquot */
389 for (i = 0; i < MAXQUOTAS; i++) {
390 dq = ip->i_dquot[i];
391 if (dq == NODQUOT)
392 continue;
393 mutex_enter(&dq->dq_interlock);
394 }
395 /* now get the corresponding quota entry */
396 for (i = 0; i < MAXQUOTAS; i++) {
397 bpp[i] = NULL;
398 q2ep[i] = NULL;
399 dq = ip->i_dquot[i];
400 if (dq == NODQUOT)
401 continue;
402 if (__predict_false(ump->um_quotas[i] == NULL)) {
403 /*
404 * quotas have been turned off. This can happen
405 * at umount time.
406 */
407 mutex_exit(&dq->dq_interlock);
408 dqrele(NULLVP, dq);
409 ip->i_dquot[i] = NULL;
410 continue;
411 }
412
413 if ((dq->dq2_lblkno | dq->dq2_blkoff) == 0) {
414 if (!alloc) {
415 continue;
416 }
417 /* need to alloc a new on-disk quot */
418 mutex_enter(&dqlock);
419 error = quota2_q2ealloc(ump, i, ino_ids[i], dq);
420 mutex_exit(&dqlock);
421 if (error)
422 return error;
423 }
424 KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
425 error = getq2e(ump, i, dq->dq2_lblkno,
426 dq->dq2_blkoff, &bpp[i], &q2ep[i],
427 modify ? B_MODIFY : 0);
428 if (error)
429 return error;
430 }
431 return 0;
432 }
433
434 __inline static int __unused
435 quota2_check_limit(struct quota2_val *q2v, uint64_t change, time_t now)
436 {
437 return quota_check_limit(q2v->q2v_cur, change, q2v->q2v_softlimit,
438 q2v->q2v_hardlimit, q2v->q2v_time, now);
439 }
440
441 static int
442 quota2_check(struct inode *ip, int vtype, int64_t change, kauth_cred_t cred,
443 int flags)
444 {
445 int error;
446 struct buf *bp[MAXQUOTAS];
447 struct quota2_entry *q2e[MAXQUOTAS];
448 struct quota2_val *q2vp;
449 struct dquot *dq;
450 uint64_t ncurblks;
451 struct ufsmount *ump = ip->i_ump;
452 struct mount *mp = ump->um_mountp;
453 const int needswap = UFS_MPNEEDSWAP(ump);
454 int i;
455
456 if ((error = getinoquota2(ip, change > 0, change != 0, bp, q2e)) != 0)
457 return error;
458 if (change == 0) {
459 for (i = 0; i < MAXQUOTAS; i++) {
460 dq = ip->i_dquot[i];
461 if (dq == NODQUOT)
462 continue;
463 if (bp[i])
464 brelse(bp[i], 0);
465 mutex_exit(&dq->dq_interlock);
466 }
467 return 0;
468 }
469 if (change < 0) {
470 for (i = 0; i < MAXQUOTAS; i++) {
471 dq = ip->i_dquot[i];
472 if (dq == NODQUOT)
473 continue;
474 if (q2e[i] == NULL) {
475 mutex_exit(&dq->dq_interlock);
476 continue;
477 }
478 q2vp = &q2e[i]->q2e_val[vtype];
479 ncurblks = ufs_rw64(q2vp->q2v_cur, needswap);
480 if (ncurblks < -change)
481 ncurblks = 0;
482 else
483 ncurblks += change;
484 q2vp->q2v_cur = ufs_rw64(ncurblks, needswap);
485 quota2_bwrite(mp, bp[i]);
486 mutex_exit(&dq->dq_interlock);
487 }
488 return 0;
489 }
490 /* see if the allocation is allowed */
491 for (i = 0; i < MAXQUOTAS; i++) {
492 struct quota2_val q2v;
493 int ql_stat;
494 dq = ip->i_dquot[i];
495 if (dq == NODQUOT)
496 continue;
497 KASSERT(q2e[i] != NULL);
498 quota2_ufs_rwq2v(&q2e[i]->q2e_val[vtype], &q2v, needswap);
499 ql_stat = quota2_check_limit(&q2v, change, time_second);
500
501 if ((flags & FORCE) == 0 &&
502 kauth_authorize_system(cred, KAUTH_SYSTEM_FS_QUOTA,
503 KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT,
504 KAUTH_ARG(i), KAUTH_ARG(vtype), NULL) != 0) {
505 /* enforce this limit */
506 switch(QL_STATUS(ql_stat)) {
507 case QL_S_DENY_HARD:
508 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
509 uprintf("\n%s: write failed, %s %s "
510 "limit reached\n",
511 mp->mnt_stat.f_mntonname,
512 quotatypes[i], limnames[vtype]);
513 dq->dq_flags |= DQ_WARN(vtype);
514 }
515 error = EDQUOT;
516 break;
517 case QL_S_DENY_GRACE:
518 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
519 uprintf("\n%s: write failed, %s %s "
520 "limit reached\n",
521 mp->mnt_stat.f_mntonname,
522 quotatypes[i], limnames[vtype]);
523 dq->dq_flags |= DQ_WARN(vtype);
524 }
525 error = EDQUOT;
526 break;
527 case QL_S_ALLOW_SOFT:
528 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
529 uprintf("\n%s: warning, %s %s "
530 "quota exceeded\n",
531 mp->mnt_stat.f_mntonname,
532 quotatypes[i], limnames[vtype]);
533 dq->dq_flags |= DQ_WARN(vtype);
534 }
535 break;
536 }
537 }
538 /*
539 * always do this; we don't know if the allocation will
540 * succed or not in the end. if we don't do the allocation
541 * q2v_time will be ignored anyway
542 */
543 if (ql_stat & QL_F_CROSS) {
544 q2v.q2v_time = time_second + q2v.q2v_grace;
545 quota2_ufs_rwq2v(&q2v, &q2e[i]->q2e_val[vtype],
546 needswap);
547 }
548 }
549
550 /* now do the allocation if allowed */
551 for (i = 0; i < MAXQUOTAS; i++) {
552 dq = ip->i_dquot[i];
553 if (dq == NODQUOT)
554 continue;
555 KASSERT(q2e[i] != NULL);
556 if (error == 0) {
557 q2vp = &q2e[i]->q2e_val[vtype];
558 ncurblks = ufs_rw64(q2vp->q2v_cur, needswap);
559 q2vp->q2v_cur = ufs_rw64(ncurblks + change, needswap);
560 quota2_bwrite(mp, bp[i]);
561 } else
562 brelse(bp[i], 0);
563 mutex_exit(&dq->dq_interlock);
564 }
565 return error;
566 }
567
568 int
569 chkdq2(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
570 {
571 return quota2_check(ip, QL_BLOCK, change, cred, flags);
572 }
573
574 int
575 chkiq2(struct inode *ip, int32_t change, kauth_cred_t cred, int flags)
576 {
577 return quota2_check(ip, QL_FILE, change, cred, flags);
578 }
579
580 int
581 quota2_handle_cmd_put(struct ufsmount *ump, const struct quotakey *key,
582 const struct quotaval *val)
583 {
584 int error;
585 struct dquot *dq;
586 struct quota2_header *q2h;
587 struct quota2_entry q2e, *q2ep;
588 struct buf *bp;
589 const int needswap = UFS_MPNEEDSWAP(ump);
590
591 /* make sure we can index by the fs-independent idtype */
592 CTASSERT(QUOTA_IDTYPE_USER == USRQUOTA);
593 CTASSERT(QUOTA_IDTYPE_GROUP == GRPQUOTA);
594
595 if (ump->um_quotas[key->qk_idtype] == NULLVP)
596 return ENODEV;
597 error = UFS_WAPBL_BEGIN(ump->um_mountp);
598 if (error)
599 return error;
600
601 if (key->qk_id == QUOTA_DEFAULTID) {
602 mutex_enter(&dqlock);
603 error = getq2h(ump, key->qk_idtype, &bp, &q2h, B_MODIFY);
604 if (error) {
605 mutex_exit(&dqlock);
606 goto out_wapbl;
607 }
608 quota2_ufs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
609 quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
610 quota2_ufs_rwq2e(&q2e, &q2h->q2h_defentry, needswap);
611 mutex_exit(&dqlock);
612 quota2_bwrite(ump->um_mountp, bp);
613 goto out_wapbl;
614 }
615
616 error = dqget(NULLVP, key->qk_id, ump, key->qk_idtype, &dq);
617 if (error)
618 goto out_wapbl;
619
620 mutex_enter(&dq->dq_interlock);
621 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
622 /* need to alloc a new on-disk quot */
623 mutex_enter(&dqlock);
624 error = quota2_q2ealloc(ump, key->qk_idtype, key->qk_id, dq);
625 mutex_exit(&dqlock);
626 if (error)
627 goto out_il;
628 }
629 KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
630 error = getq2e(ump, key->qk_idtype, dq->dq2_lblkno,
631 dq->dq2_blkoff, &bp, &q2ep, B_MODIFY);
632 if (error)
633 goto out_il;
634
635 quota2_ufs_rwq2e(q2ep, &q2e, needswap);
636 quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
637 quota2_ufs_rwq2e(&q2e, q2ep, needswap);
638 quota2_bwrite(ump->um_mountp, bp);
639
640 out_il:
641 mutex_exit(&dq->dq_interlock);
642 dqrele(NULLVP, dq);
643 out_wapbl:
644 UFS_WAPBL_END(ump->um_mountp);
645 return error;
646 }
647
648 struct dq2clear_callback {
649 uid_t id;
650 struct dquot *dq;
651 struct quota2_header *q2h;
652 };
653
654 static int
655 dq2clear_callback(struct ufsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
656 uint64_t off, void *v)
657 {
658 struct dq2clear_callback *c = v;
659 #ifdef FFS_EI
660 const int needswap = UFS_MPNEEDSWAP(ump);
661 #endif
662 uint64_t myoff;
663
664 if (ufs_rw32(q2e->q2e_uid, needswap) == c->id) {
665 KASSERT(mutex_owned(&c->dq->dq_interlock));
666 c->dq->dq2_lblkno = 0;
667 c->dq->dq2_blkoff = 0;
668 myoff = *offp;
669 /* remove from hash list */
670 *offp = q2e->q2e_next;
671 /* add to free list */
672 q2e->q2e_next = c->q2h->q2h_free;
673 c->q2h->q2h_free = myoff;
674 return Q2WL_ABORT;
675 }
676 return 0;
677 }
678 int
679 quota2_handle_cmd_delete(struct ufsmount *ump, const struct quotakey *qk)
680 {
681 int idtype;
682 id_t id;
683 int objtype;
684 int error, i, canfree;
685 struct dquot *dq;
686 struct quota2_header *q2h;
687 struct quota2_entry q2e, *q2ep;
688 struct buf *hbp, *bp;
689 u_long hash_mask;
690 struct dq2clear_callback c;
691
692 idtype = qk->qk_idtype;
693 id = qk->qk_id;
694 objtype = qk->qk_objtype;
695
696 if (ump->um_quotas[idtype] == NULLVP)
697 return ENODEV;
698 if (id == QUOTA_DEFAULTID)
699 return EOPNOTSUPP;
700
701 /* get the default entry before locking the entry's buffer */
702 mutex_enter(&dqlock);
703 error = getq2h(ump, idtype, &hbp, &q2h, 0);
704 if (error) {
705 mutex_exit(&dqlock);
706 return error;
707 }
708 /* we'll copy to another disk entry, so no need to swap */
709 memcpy(&q2e, &q2h->q2h_defentry, sizeof(q2e));
710 mutex_exit(&dqlock);
711 brelse(hbp, 0);
712
713 error = dqget(NULLVP, id, ump, idtype, &dq);
714 if (error)
715 return error;
716
717 mutex_enter(&dq->dq_interlock);
718 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
719 /* already clear, nothing to do */
720 error = ENOENT;
721 goto out_il;
722 }
723 error = UFS_WAPBL_BEGIN(ump->um_mountp);
724 if (error)
725 goto out_dq;
726
727 error = getq2e(ump, idtype, dq->dq2_lblkno, dq->dq2_blkoff,
728 &bp, &q2ep, B_MODIFY);
729 if (error)
730 goto out_wapbl;
731
732 /* make sure we can index by the objtype passed in */
733 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
734 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
735
736 /* clear the requested objtype by copying from the default entry */
737 q2ep->q2e_val[objtype].q2v_softlimit =
738 q2e.q2e_val[objtype].q2v_softlimit;
739 q2ep->q2e_val[objtype].q2v_hardlimit =
740 q2e.q2e_val[objtype].q2v_hardlimit;
741 q2ep->q2e_val[objtype].q2v_grace =
742 q2e.q2e_val[objtype].q2v_grace;
743 q2ep->q2e_val[objtype].q2v_time = 0;
744
745 /* if this entry now contains no information, we can free it */
746 canfree = 1;
747 for (i = 0; i < N_QL; i++) {
748 if (q2ep->q2e_val[i].q2v_cur != 0 ||
749 (q2ep->q2e_val[i].q2v_softlimit !=
750 q2e.q2e_val[i].q2v_softlimit) ||
751 (q2ep->q2e_val[i].q2v_hardlimit !=
752 q2e.q2e_val[i].q2v_hardlimit) ||
753 (q2ep->q2e_val[i].q2v_grace !=
754 q2e.q2e_val[i].q2v_grace)) {
755 canfree = 0;
756 break;
757 }
758 /* note: do not need to check q2v_time */
759 }
760
761 if (canfree == 0) {
762 quota2_bwrite(ump->um_mountp, bp);
763 goto out_wapbl;
764 }
765 /* we can free it. release bp so we can walk the list */
766 brelse(bp, 0);
767 mutex_enter(&dqlock);
768 error = getq2h(ump, idtype, &hbp, &q2h, 0);
769 if (error)
770 goto out_dqlock;
771
772 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
773 c.dq = dq;
774 c.id = id;
775 c.q2h = q2h;
776 error = quota2_walk_list(ump, hbp, idtype,
777 &q2h->q2h_entries[id & hash_mask], B_MODIFY, &c,
778 dq2clear_callback);
779
780 bwrite(hbp);
781
782 out_dqlock:
783 mutex_exit(&dqlock);
784 out_wapbl:
785 UFS_WAPBL_END(ump->um_mountp);
786 out_il:
787 mutex_exit(&dq->dq_interlock);
788 out_dq:
789 dqrele(NULLVP, dq);
790 return error;
791 }
792
793 static int
794 quota2_fetch_q2e(struct ufsmount *ump, const struct quotakey *qk,
795 struct quota2_entry *ret)
796 {
797 struct dquot *dq;
798 int error;
799 struct quota2_entry *q2ep;
800 struct buf *bp;
801 const int needswap = UFS_MPNEEDSWAP(ump);
802
803 error = dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
804 if (error)
805 return error;
806
807 mutex_enter(&dq->dq_interlock);
808 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
809 mutex_exit(&dq->dq_interlock);
810 dqrele(NULLVP, dq);
811 return ENOENT;
812 }
813 error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
814 &bp, &q2ep, 0);
815 if (error) {
816 mutex_exit(&dq->dq_interlock);
817 dqrele(NULLVP, dq);
818 return error;
819 }
820 quota2_ufs_rwq2e(q2ep, ret, needswap);
821 brelse(bp, 0);
822 mutex_exit(&dq->dq_interlock);
823 dqrele(NULLVP, dq);
824
825 return 0;
826 }
827
828 static int
829 quota2_fetch_quotaval(struct ufsmount *ump, const struct quotakey *qk,
830 struct quotaval *ret)
831 {
832 struct dquot *dq;
833 int error;
834 struct quota2_entry *q2ep, q2e;
835 struct buf *bp;
836 const int needswap = UFS_MPNEEDSWAP(ump);
837 id_t id2;
838
839 error = dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
840 if (error)
841 return error;
842
843 mutex_enter(&dq->dq_interlock);
844 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
845 mutex_exit(&dq->dq_interlock);
846 dqrele(NULLVP, dq);
847 return ENOENT;
848 }
849 error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
850 &bp, &q2ep, 0);
851 if (error) {
852 mutex_exit(&dq->dq_interlock);
853 dqrele(NULLVP, dq);
854 return error;
855 }
856 quota2_ufs_rwq2e(q2ep, &q2e, needswap);
857 brelse(bp, 0);
858 mutex_exit(&dq->dq_interlock);
859 dqrele(NULLVP, dq);
860
861 q2e_to_quotaval(&q2e, 0, &id2, qk->qk_objtype, ret);
862 KASSERT(id2 == qk->qk_id);
863 return 0;
864 }
865
866 int
867 quota2_handle_cmd_get(struct ufsmount *ump, const struct quotakey *qk,
868 struct quotaval *qv)
869 {
870 int error;
871 struct quota2_header *q2h;
872 struct quota2_entry q2e;
873 struct buf *bp;
874 const int needswap = UFS_MPNEEDSWAP(ump);
875 id_t id2;
876
877 /*
878 * Make sure the FS-independent codes match the internal ones,
879 * so we can use the passed-in objtype without having to
880 * convert it explicitly to QL_BLOCK/QL_FILE.
881 */
882 CTASSERT(QL_BLOCK == QUOTA_OBJTYPE_BLOCKS);
883 CTASSERT(QL_FILE == QUOTA_OBJTYPE_FILES);
884 CTASSERT(N_QL == 2);
885
886 if (qk->qk_objtype < 0 || qk->qk_objtype >= N_QL) {
887 return EINVAL;
888 }
889
890 if (ump->um_quotas[qk->qk_idtype] == NULLVP)
891 return ENODEV;
892 if (qk->qk_id == QUOTA_DEFAULTID) {
893 mutex_enter(&dqlock);
894 error = getq2h(ump, qk->qk_idtype, &bp, &q2h, 0);
895 if (error) {
896 mutex_exit(&dqlock);
897 return error;
898 }
899 quota2_ufs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
900 mutex_exit(&dqlock);
901 brelse(bp, 0);
902 q2e_to_quotaval(&q2e, qk->qk_id == QUOTA_DEFAULTID, &id2,
903 qk->qk_objtype, qv);
904 (void)id2;
905 } else
906 error = quota2_fetch_quotaval(ump, qk, qv);
907
908 return error;
909 }
910
911 /*
912 * Cursor structure we used.
913 *
914 * This will get stored in userland between calls so we must not assume
915 * it isn't arbitrarily corrupted.
916 */
917 struct ufsq2_cursor {
918 uint32_t q2c_magic; /* magic number */
919 int q2c_hashsize; /* size of hash table at last go */
920
921 int q2c_users_done; /* true if we've returned all user data */
922 int q2c_groups_done; /* true if we've returned all group data */
923 int q2c_defaults_done; /* true if we've returned the default values */
924 int q2c_hashpos; /* slot to start at in hash table */
925 int q2c_uidpos; /* number of ids we've handled */
926 int q2c_blocks_done; /* true if we've returned the blocks value */
927 };
928
929 /*
930 * State of a single cursorget call, or at least the part of it that
931 * needs to be passed around.
932 */
933 struct q2cursor_state {
934 /* data return pointers */
935 struct quotakey *keys;
936 struct quotaval *vals;
937
938 /* key/value counters */
939 unsigned maxkeyvals;
940 unsigned numkeys; /* number of keys assigned */
941
942 /* ID to key/value conversion state */
943 int skipfirst; /* if true skip first key/value */
944 int skiplast; /* if true skip last key/value */
945
946 /* ID counters */
947 unsigned maxids; /* maximum number of IDs to handle */
948 unsigned numids; /* number of IDs handled */
949 };
950
951 /*
952 * Additional structure for getids callback.
953 */
954 struct q2cursor_getids {
955 struct q2cursor_state *state;
956 int idtype;
957 unsigned skip; /* number of ids to skip over */
958 unsigned new_skip; /* number of ids to skip over next time */
959 unsigned skipped; /* number skipped so far */
960 int stopped; /* true if we stopped quota_walk_list early */
961 };
962
963 /*
964 * Cursor-related functions
965 */
966
967 /* magic number */
968 #define Q2C_MAGIC (0xbeebe111)
969
970 /* extract cursor from caller form */
971 #define Q2CURSOR(qkc) ((struct ufsq2_cursor *)&qkc->u.qkc_space[0])
972
973 /*
974 * Check that a cursor we're handed is something like valid. If
975 * someone munges it and it still passes these checks, they'll get
976 * partial or odd results back but won't break anything.
977 */
978 static int
979 q2cursor_check(struct ufsq2_cursor *cursor)
980 {
981 if (cursor->q2c_magic != Q2C_MAGIC) {
982 return EINVAL;
983 }
984 if (cursor->q2c_hashsize < 0) {
985 return EINVAL;
986 }
987
988 if (cursor->q2c_users_done != 0 && cursor->q2c_users_done != 1) {
989 return EINVAL;
990 }
991 if (cursor->q2c_groups_done != 0 && cursor->q2c_groups_done != 1) {
992 return EINVAL;
993 }
994 if (cursor->q2c_defaults_done != 0 && cursor->q2c_defaults_done != 1) {
995 return EINVAL;
996 }
997 if (cursor->q2c_hashpos < 0 || cursor->q2c_uidpos < 0) {
998 return EINVAL;
999 }
1000 if (cursor->q2c_blocks_done != 0 && cursor->q2c_blocks_done != 1) {
1001 return EINVAL;
1002 }
1003 return 0;
1004 }
1005
1006 /*
1007 * Set up the q2cursor state.
1008 */
1009 static void
1010 q2cursor_initstate(struct q2cursor_state *state, struct quotakey *keys,
1011 struct quotaval *vals, unsigned maxkeyvals, int blocks_done)
1012 {
1013 state->keys = keys;
1014 state->vals = vals;
1015
1016 state->maxkeyvals = maxkeyvals;
1017 state->numkeys = 0;
1018
1019 /*
1020 * For each ID there are two quotavals to return. If the
1021 * maximum number of entries to return is odd, we might want
1022 * to skip the first quotaval of the first ID, or the last
1023 * quotaval of the last ID, but not both. So the number of IDs
1024 * we want is (up to) half the number of return slots we have,
1025 * rounded up.
1026 */
1027
1028 state->maxids = (state->maxkeyvals + 1) / 2;
1029 state->numids = 0;
1030 if (state->maxkeyvals % 2) {
1031 if (blocks_done) {
1032 state->skipfirst = 1;
1033 state->skiplast = 0;
1034 } else {
1035 state->skipfirst = 0;
1036 state->skiplast = 1;
1037 }
1038 } else {
1039 state->skipfirst = 0;
1040 state->skiplast = 0;
1041 }
1042 }
1043
1044 /*
1045 * Choose which idtype we're going to work on. If doing a full
1046 * iteration, we do users first, then groups, but either might be
1047 * disabled or marked to skip via cursorsetidtype(), so don't make
1048 * silly assumptions.
1049 */
1050 static int
1051 q2cursor_pickidtype(struct ufsq2_cursor *cursor, int *idtype_ret)
1052 {
1053 if (cursor->q2c_users_done == 0) {
1054 *idtype_ret = QUOTA_IDTYPE_USER;
1055 } else if (cursor->q2c_groups_done == 0) {
1056 *idtype_ret = QUOTA_IDTYPE_GROUP;
1057 } else {
1058 return EAGAIN;
1059 }
1060 return 0;
1061 }
1062
1063 /*
1064 * Add an ID to the current state. Sets up either one or two keys to
1065 * refer to it, depending on whether it's first/last and the setting
1066 * of skipfirst. (skiplast does not need to be explicitly tested)
1067 */
1068 static void
1069 q2cursor_addid(struct q2cursor_state *state, int idtype, id_t id)
1070 {
1071 KASSERT(state->numids < state->maxids);
1072 KASSERT(state->numkeys < state->maxkeyvals);
1073
1074 if (!state->skipfirst || state->numkeys > 0) {
1075 state->keys[state->numkeys].qk_idtype = idtype;
1076 state->keys[state->numkeys].qk_id = id;
1077 state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_BLOCKS;
1078 state->numkeys++;
1079 }
1080 if (state->numkeys < state->maxkeyvals) {
1081 state->keys[state->numkeys].qk_idtype = idtype;
1082 state->keys[state->numkeys].qk_id = id;
1083 state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_FILES;
1084 state->numkeys++;
1085 } else {
1086 KASSERT(state->skiplast);
1087 }
1088 state->numids++;
1089 }
1090
1091 /*
1092 * Callback function for getting IDs. Update counting and call addid.
1093 */
1094 static int
1095 q2cursor_getids_callback(struct ufsmount *ump, uint64_t *offp,
1096 struct quota2_entry *q2ep, uint64_t off, void *v)
1097 {
1098 struct q2cursor_getids *gi = v;
1099 id_t id;
1100 #ifdef FFS_EI
1101 const int needswap = UFS_MPNEEDSWAP(ump);
1102 #endif
1103
1104 if (gi->skipped < gi->skip) {
1105 gi->skipped++;
1106 return 0;
1107 }
1108 id = ufs_rw32(q2ep->q2e_uid, needswap);
1109 q2cursor_addid(gi->state, gi->idtype, id);
1110 gi->new_skip++;
1111 if (gi->state->numids >= gi->state->maxids) {
1112 /* got enough ids, stop now */
1113 gi->stopped = 1;
1114 return Q2WL_ABORT;
1115 }
1116 return 0;
1117 }
1118
1119 /*
1120 * Fill in a batch of quotakeys by scanning one or more hash chains.
1121 */
1122 static int
1123 q2cursor_getkeys(struct ufsmount *ump, int idtype, struct ufsq2_cursor *cursor,
1124 struct q2cursor_state *state,
1125 int *hashsize_ret, struct quota2_entry *default_q2e_ret)
1126 {
1127 const int needswap = UFS_MPNEEDSWAP(ump);
1128 struct buf *hbp;
1129 struct quota2_header *q2h;
1130 int quota2_hash_size;
1131 struct q2cursor_getids gi;
1132 uint64_t offset;
1133 int error;
1134
1135 /*
1136 * Read the header block.
1137 */
1138
1139 mutex_enter(&dqlock);
1140 error = getq2h(ump, idtype, &hbp, &q2h, 0);
1141 if (error) {
1142 mutex_exit(&dqlock);
1143 return error;
1144 }
1145
1146 /* if the table size has changed, make the caller start over */
1147 quota2_hash_size = ufs_rw16(q2h->q2h_hash_size, needswap);
1148 if (cursor->q2c_hashsize == 0) {
1149 cursor->q2c_hashsize = quota2_hash_size;
1150 } else if (cursor->q2c_hashsize != quota2_hash_size) {
1151 error = EDEADLK;
1152 goto scanfail;
1153 }
1154
1155 /* grab the entry with the default values out of the header */
1156 quota2_ufs_rwq2e(&q2h->q2h_defentry, default_q2e_ret, needswap);
1157
1158 /* If we haven't done the defaults yet, that goes first. */
1159 if (cursor->q2c_defaults_done == 0) {
1160 q2cursor_addid(state, idtype, QUOTA_DEFAULTID);
1161 /* if we read both halves, mark it done */
1162 if (state->numids < state->maxids || !state->skiplast) {
1163 cursor->q2c_defaults_done = 1;
1164 }
1165 }
1166
1167 gi.state = state;
1168 gi.idtype = idtype;
1169
1170 while (state->numids < state->maxids) {
1171 if (cursor->q2c_hashpos >= quota2_hash_size) {
1172 /* nothing more left */
1173 break;
1174 }
1175
1176 /* scan this hash chain */
1177 gi.skip = cursor->q2c_uidpos;
1178 gi.new_skip = gi.skip;
1179 gi.skipped = 0;
1180 gi.stopped = 0;
1181 offset = q2h->q2h_entries[cursor->q2c_hashpos];
1182
1183 error = quota2_walk_list(ump, hbp, idtype, &offset, 0, &gi,
1184 q2cursor_getids_callback);
1185 KASSERT(error != Q2WL_ABORT);
1186 if (error) {
1187 break;
1188 }
1189 if (gi.stopped) {
1190 /* callback stopped before reading whole chain */
1191 cursor->q2c_uidpos = gi.new_skip;
1192 /* if we didn't get both halves, back up */
1193 if (state->numids == state->maxids && state->skiplast){
1194 KASSERT(cursor->q2c_uidpos > 0);
1195 cursor->q2c_uidpos--;
1196 }
1197 } else {
1198 /* read whole chain */
1199 /* if we got both halves of the last id, advance */
1200 if (state->numids < state->maxids || !state->skiplast){
1201 cursor->q2c_uidpos = 0;
1202 cursor->q2c_hashpos++;
1203 }
1204 }
1205 }
1206
1207 scanfail:
1208 mutex_exit(&dqlock);
1209 brelse(hbp, 0);
1210 if (error)
1211 return error;
1212
1213 *hashsize_ret = quota2_hash_size;
1214 return 0;
1215 }
1216
1217 /*
1218 * Fetch the quotavals for the quotakeys.
1219 */
1220 static int
1221 q2cursor_getvals(struct ufsmount *ump, struct q2cursor_state *state,
1222 const struct quota2_entry *default_q2e)
1223 {
1224 int hasid;
1225 id_t loadedid, id;
1226 unsigned pos;
1227 struct quota2_entry q2e;
1228 int objtype;
1229 int error;
1230
1231 hasid = 0;
1232 loadedid = 0;
1233 for (pos = 0; pos < state->numkeys; pos++) {
1234 id = state->keys[pos].qk_id;
1235 if (!hasid || id != loadedid) {
1236 hasid = 1;
1237 loadedid = id;
1238 if (id == QUOTA_DEFAULTID) {
1239 q2e = *default_q2e;
1240 } else {
1241 error = quota2_fetch_q2e(ump,
1242 &state->keys[pos],
1243 &q2e);
1244 if (error == ENOENT) {
1245 /* something changed - start over */
1246 error = EDEADLK;
1247 }
1248 if (error) {
1249 return error;
1250 }
1251 }
1252 }
1253
1254
1255 objtype = state->keys[pos].qk_objtype;
1256 KASSERT(objtype >= 0 && objtype < N_QL);
1257 q2val_to_quotaval(&q2e.q2e_val[objtype], &state->vals[pos]);
1258 }
1259
1260 return 0;
1261 }
1262
1263 /*
1264 * Handle cursorget.
1265 *
1266 * We can't just read keys and values directly, because we can't walk
1267 * the list with qdlock and grab dq_interlock to read the entries at
1268 * the same time. So we're going to do two passes: one to figure out
1269 * which IDs we want and fill in the keys, and then a second to use
1270 * the keys to fetch the values.
1271 */
1272 int
1273 quota2_handle_cmd_cursorget(struct ufsmount *ump, struct quotakcursor *qkc,
1274 struct quotakey *keys, struct quotaval *vals, unsigned maxreturn,
1275 unsigned *ret)
1276 {
1277 int error;
1278 struct ufsq2_cursor *cursor;
1279 struct ufsq2_cursor newcursor;
1280 struct q2cursor_state state;
1281 struct quota2_entry default_q2e;
1282 int idtype;
1283 int quota2_hash_size;
1284
1285 /*
1286 * Convert and validate the cursor.
1287 */
1288 cursor = Q2CURSOR(qkc);
1289 error = q2cursor_check(cursor);
1290 if (error) {
1291 return error;
1292 }
1293
1294 /*
1295 * Make sure our on-disk codes match the values of the
1296 * FS-independent ones. This avoids the need for explicit
1297 * conversion (which would be a NOP anyway and thus easily
1298 * left out or called in the wrong places...)
1299 */
1300 CTASSERT(QUOTA_IDTYPE_USER == USRQUOTA);
1301 CTASSERT(QUOTA_IDTYPE_GROUP == GRPQUOTA);
1302 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
1303 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
1304
1305 /*
1306 * If some of the idtypes aren't configured/enabled, arrange
1307 * to skip over them.
1308 */
1309 if (cursor->q2c_users_done == 0 &&
1310 ump->um_quotas[USRQUOTA] == NULLVP) {
1311 cursor->q2c_users_done = 1;
1312 }
1313 if (cursor->q2c_groups_done == 0 &&
1314 ump->um_quotas[GRPQUOTA] == NULLVP) {
1315 cursor->q2c_groups_done = 1;
1316 }
1317
1318 /* Loop over, potentially, both idtypes */
1319 while (1) {
1320
1321 /* Choose id type */
1322 error = q2cursor_pickidtype(cursor, &idtype);
1323 if (error == EAGAIN) {
1324 /* nothing more to do, return 0 */
1325 *ret = 0;
1326 return 0;
1327 }
1328 KASSERT(ump->um_quotas[idtype] != NULLVP);
1329
1330 /*
1331 * Initialize the per-call iteration state. Copy the
1332 * cursor state so we can update it in place but back
1333 * out on error.
1334 */
1335 q2cursor_initstate(&state, keys, vals, maxreturn,
1336 cursor->q2c_blocks_done);
1337 newcursor = *cursor;
1338
1339 /* Assign keys */
1340 error = q2cursor_getkeys(ump, idtype, &newcursor, &state,
1341 "a2_hash_size, &default_q2e);
1342 if (error) {
1343 return error;
1344 }
1345
1346 /* Now fill in the values. */
1347 error = q2cursor_getvals(ump, &state, &default_q2e);
1348 if (error) {
1349 return error;
1350 }
1351
1352 /*
1353 * Now that we aren't going to fail and lose what we
1354 * did so far, we can update the cursor state.
1355 */
1356
1357 if (newcursor.q2c_hashpos >= quota2_hash_size) {
1358 if (idtype == QUOTA_IDTYPE_USER)
1359 cursor->q2c_users_done = 1;
1360 else
1361 cursor->q2c_groups_done = 1;
1362
1363 /* start over on another id type */
1364 cursor->q2c_hashsize = 0;
1365 cursor->q2c_defaults_done = 0;
1366 cursor->q2c_hashpos = 0;
1367 cursor->q2c_uidpos = 0;
1368 cursor->q2c_blocks_done = 0;
1369 } else {
1370 *cursor = newcursor;
1371 cursor->q2c_blocks_done = state.skiplast;
1372 }
1373
1374 /*
1375 * If we have something to return, return it.
1376 * Otherwise, continue to the other idtype, if any,
1377 * and only return zero at end of iteration.
1378 */
1379 if (state.numkeys > 0) {
1380 break;
1381 }
1382 }
1383
1384 *ret = state.numkeys;
1385 return 0;
1386 }
1387
1388 int
1389 quota2_handle_cmd_cursoropen(struct ufsmount *ump, struct quotakcursor *qkc)
1390 {
1391 struct ufsq2_cursor *cursor;
1392
1393 CTASSERT(sizeof(*cursor) <= sizeof(qkc->u.qkc_space));
1394 cursor = Q2CURSOR(qkc);
1395
1396 cursor->q2c_magic = Q2C_MAGIC;
1397 cursor->q2c_hashsize = 0;
1398
1399 cursor->q2c_users_done = 0;
1400 cursor->q2c_groups_done = 0;
1401 cursor->q2c_defaults_done = 0;
1402 cursor->q2c_hashpos = 0;
1403 cursor->q2c_uidpos = 0;
1404 cursor->q2c_blocks_done = 0;
1405 return 0;
1406 }
1407
1408 int
1409 quota2_handle_cmd_cursorclose(struct ufsmount *ump, struct quotakcursor *qkc)
1410 {
1411 struct ufsq2_cursor *cursor;
1412 int error;
1413
1414 cursor = Q2CURSOR(qkc);
1415 error = q2cursor_check(cursor);
1416 if (error) {
1417 return error;
1418 }
1419
1420 /* nothing to do */
1421
1422 return 0;
1423 }
1424
1425 int
1426 quota2_handle_cmd_cursorskipidtype(struct ufsmount *ump,
1427 struct quotakcursor *qkc, int idtype)
1428 {
1429 struct ufsq2_cursor *cursor;
1430 int error;
1431
1432 cursor = Q2CURSOR(qkc);
1433 error = q2cursor_check(cursor);
1434 if (error) {
1435 return error;
1436 }
1437
1438 switch (idtype) {
1439 case QUOTA_IDTYPE_USER:
1440 cursor->q2c_users_done = 1;
1441 break;
1442 case QUOTA_IDTYPE_GROUP:
1443 cursor->q2c_groups_done = 1;
1444 break;
1445 default:
1446 return EINVAL;
1447 }
1448
1449 return 0;
1450 }
1451
1452 int
1453 quota2_handle_cmd_cursoratend(struct ufsmount *ump, struct quotakcursor *qkc,
1454 int *ret)
1455 {
1456 struct ufsq2_cursor *cursor;
1457 int error;
1458
1459 cursor = Q2CURSOR(qkc);
1460 error = q2cursor_check(cursor);
1461 if (error) {
1462 return error;
1463 }
1464
1465 *ret = (cursor->q2c_users_done && cursor->q2c_groups_done);
1466 return 0;
1467 }
1468
1469 int
1470 quota2_handle_cmd_cursorrewind(struct ufsmount *ump, struct quotakcursor *qkc)
1471 {
1472 struct ufsq2_cursor *cursor;
1473 int error;
1474
1475 cursor = Q2CURSOR(qkc);
1476 error = q2cursor_check(cursor);
1477 if (error) {
1478 return error;
1479 }
1480
1481 cursor->q2c_hashsize = 0;
1482
1483 cursor->q2c_users_done = 0;
1484 cursor->q2c_groups_done = 0;
1485 cursor->q2c_defaults_done = 0;
1486 cursor->q2c_hashpos = 0;
1487 cursor->q2c_uidpos = 0;
1488 cursor->q2c_blocks_done = 0;
1489
1490 return 0;
1491 }
1492
1493 int
1494 q2sync(struct mount *mp)
1495 {
1496 return 0;
1497 }
1498
1499 struct dq2get_callback {
1500 uid_t id;
1501 struct dquot *dq;
1502 };
1503
1504 static int
1505 dq2get_callback(struct ufsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
1506 uint64_t off, void *v)
1507 {
1508 struct dq2get_callback *c = v;
1509 daddr_t lblkno;
1510 int blkoff;
1511 #ifdef FFS_EI
1512 const int needswap = UFS_MPNEEDSWAP(ump);
1513 #endif
1514
1515 if (ufs_rw32(q2e->q2e_uid, needswap) == c->id) {
1516 KASSERT(mutex_owned(&c->dq->dq_interlock));
1517 lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
1518 blkoff = (off & ump->umq2_bmask);
1519 c->dq->dq2_lblkno = lblkno;
1520 c->dq->dq2_blkoff = blkoff;
1521 return Q2WL_ABORT;
1522 }
1523 return 0;
1524 }
1525
1526 int
1527 dq2get(struct vnode *dqvp, u_long id, struct ufsmount *ump, int type,
1528 struct dquot *dq)
1529 {
1530 struct buf *bp;
1531 struct quota2_header *q2h;
1532 int error;
1533 daddr_t offset;
1534 u_long hash_mask;
1535 struct dq2get_callback c = {
1536 .id = id,
1537 .dq = dq
1538 };
1539
1540 KASSERT(mutex_owned(&dq->dq_interlock));
1541 mutex_enter(&dqlock);
1542 error = getq2h(ump, type, &bp, &q2h, 0);
1543 if (error)
1544 goto out_mutex;
1545 /* look for our entry */
1546 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
1547 offset = q2h->q2h_entries[id & hash_mask];
1548 error = quota2_walk_list(ump, bp, type, &offset, 0, (void *)&c,
1549 dq2get_callback);
1550 brelse(bp, 0);
1551 out_mutex:
1552 mutex_exit(&dqlock);
1553 return error;
1554 }
1555
1556 int
1557 dq2sync(struct vnode *vp, struct dquot *dq)
1558 {
1559 return 0;
1560 }
1561