ulfs_quota2.c revision 1.8 1 /* $NetBSD: ulfs_quota2.c,v 1.8 2013/07/28 00:37:07 dholland Exp $ */
2 /* from NetBSD: ufs_quota2.c,v 1.35 2012/09/27 07:47:56 bouyer Exp */
3
4 /*-
5 * Copyright (c) 2010 Manuel Bouyer
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: ulfs_quota2.c,v 1.8 2013/07/28 00:37:07 dholland Exp $");
32
33 #include <sys/buf.h>
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/systm.h>
37 #include <sys/namei.h>
38 #include <sys/file.h>
39 #include <sys/proc.h>
40 #include <sys/vnode.h>
41 #include <sys/mount.h>
42 #include <sys/fstrans.h>
43 #include <sys/kauth.h>
44 #include <sys/wapbl.h>
45 #include <sys/quota.h>
46 #include <sys/quotactl.h>
47
48 #include <ufs/lfs/lfs_extern.h>
49
50 #include <ufs/lfs/ulfs_quota2.h>
51 #include <ufs/lfs/ulfs_inode.h>
52 #include <ufs/lfs/ulfsmount.h>
53 #include <ufs/lfs/ulfs_bswap.h>
54 #include <ufs/lfs/ulfs_extern.h>
55 #include <ufs/lfs/ulfs_quota.h>
56
57 /*
58 * LOCKING:
59 * Data in the entries are protected by the associated struct dquot's
60 * dq_interlock (this means we can't read or change a quota entry without
61 * grabing a dquot for it).
62 * The header and lists (including pointers in the data entries, and q2e_uid)
63 * are protected by the global dqlock.
64 * the locking order is dq_interlock -> dqlock
65 */
66
67 static int quota2_bwrite(struct mount *, struct buf *);
68 static int getinoquota2(struct inode *, bool, bool, struct buf **,
69 struct quota2_entry **);
70 static int getq2h(struct ulfsmount *, int, struct buf **,
71 struct quota2_header **, int);
72 static int getq2e(struct ulfsmount *, int, daddr_t, int, struct buf **,
73 struct quota2_entry **, int);
74 static int quota2_walk_list(struct ulfsmount *, struct buf *, int,
75 uint64_t *, int, void *,
76 int (*func)(struct ulfsmount *, uint64_t *, struct quota2_entry *,
77 uint64_t, void *));
78
79 static const char *limnames[] = INITQLNAMES;
80
81 static void
82 quota2_dict_update_q2e_limits(int objtype, const struct quotaval *val,
83 struct quota2_entry *q2e)
84 {
85 /* make sure we can index q2e_val[] by the fs-independent objtype */
86 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
87 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
88
89 q2e->q2e_val[objtype].q2v_hardlimit = val->qv_hardlimit;
90 q2e->q2e_val[objtype].q2v_softlimit = val->qv_softlimit;
91 q2e->q2e_val[objtype].q2v_grace = val->qv_grace;
92 }
93
94 /*
95 * Convert internal representation to FS-independent representation.
96 * (Note that while the two types are currently identical, the
97 * internal representation is an on-disk struct and the FS-independent
98 * representation is not, and they might diverge in the future.)
99 */
100 static void
101 q2val_to_quotaval(struct quota2_val *q2v, struct quotaval *qv)
102 {
103 qv->qv_softlimit = q2v->q2v_softlimit;
104 qv->qv_hardlimit = q2v->q2v_hardlimit;
105 qv->qv_usage = q2v->q2v_cur;
106 qv->qv_expiretime = q2v->q2v_time;
107 qv->qv_grace = q2v->q2v_grace;
108 }
109
110 /*
111 * Convert a quota2entry and default-flag to the FS-independent
112 * representation.
113 */
114 static void
115 q2e_to_quotaval(struct quota2_entry *q2e, int def,
116 id_t *id, int objtype, struct quotaval *ret)
117 {
118 if (def) {
119 *id = QUOTA_DEFAULTID;
120 } else {
121 *id = q2e->q2e_uid;
122 }
123
124 KASSERT(objtype >= 0 && objtype < N_QL);
125 q2val_to_quotaval(&q2e->q2e_val[objtype], ret);
126 }
127
128
129 static int
130 quota2_bwrite(struct mount *mp, struct buf *bp)
131 {
132 if (mp->mnt_flag & MNT_SYNCHRONOUS)
133 return bwrite(bp);
134 else {
135 bdwrite(bp);
136 return 0;
137 }
138 }
139
140 static int
141 getq2h(struct ulfsmount *ump, int type,
142 struct buf **bpp, struct quota2_header **q2hp, int flags)
143 {
144 #ifdef LFS_EI
145 const int needswap = ULFS_MPNEEDSWAP(ump);
146 #endif
147 int error;
148 struct buf *bp;
149 struct quota2_header *q2h;
150
151 KASSERT(mutex_owned(&lfs_dqlock));
152 error = bread(ump->um_quotas[type], 0, ump->umq2_bsize,
153 ump->um_cred[type], flags, &bp);
154 if (error)
155 return error;
156 if (bp->b_resid != 0)
157 panic("dq2get: %s quota file truncated", lfs_quotatypes[type]);
158
159 q2h = (void *)bp->b_data;
160 if (ulfs_rw32(q2h->q2h_magic_number, needswap) != Q2_HEAD_MAGIC ||
161 q2h->q2h_type != type)
162 panic("dq2get: corrupted %s quota header", lfs_quotatypes[type]);
163 *bpp = bp;
164 *q2hp = q2h;
165 return 0;
166 }
167
168 static int
169 getq2e(struct ulfsmount *ump, int type, daddr_t lblkno, int blkoffset,
170 struct buf **bpp, struct quota2_entry **q2ep, int flags)
171 {
172 int error;
173 struct buf *bp;
174
175 if (blkoffset & (sizeof(uint64_t) - 1)) {
176 panic("dq2get: %s quota file corrupted",
177 lfs_quotatypes[type]);
178 }
179 error = bread(ump->um_quotas[type], lblkno, ump->umq2_bsize,
180 ump->um_cred[type], flags, &bp);
181 if (error)
182 return error;
183 if (bp->b_resid != 0) {
184 panic("dq2get: %s quota file corrupted",
185 lfs_quotatypes[type]);
186 }
187 *q2ep = (void *)((char *)bp->b_data + blkoffset);
188 *bpp = bp;
189 return 0;
190 }
191
192 /* walk a quota entry list, calling the callback for each entry */
193 #define Q2WL_ABORT 0x10000000
194
195 static int
196 quota2_walk_list(struct ulfsmount *ump, struct buf *hbp, int type,
197 uint64_t *offp, int flags, void *a,
198 int (*func)(struct ulfsmount *, uint64_t *, struct quota2_entry *, uint64_t, void *))
199 {
200 #ifdef LFS_EI
201 const int needswap = ULFS_MPNEEDSWAP(ump);
202 #endif
203 daddr_t off = ulfs_rw64(*offp, needswap);
204 struct buf *bp, *obp = hbp;
205 int ret = 0, ret2 = 0;
206 struct quota2_entry *q2e;
207 daddr_t lblkno, blkoff, olblkno = 0;
208
209 KASSERT(mutex_owner(&lfs_dqlock));
210
211 while (off != 0) {
212 lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
213 blkoff = (off & ump->umq2_bmask);
214 if (lblkno == 0) {
215 /* in the header block */
216 bp = hbp;
217 } else if (lblkno == olblkno) {
218 /* still in the same buf */
219 bp = obp;
220 } else {
221 ret = bread(ump->um_quotas[type], lblkno,
222 ump->umq2_bsize,
223 ump->um_cred[type], flags, &bp);
224 if (ret)
225 return ret;
226 if (bp->b_resid != 0) {
227 panic("quota2_walk_list: %s quota file corrupted",
228 lfs_quotatypes[type]);
229 }
230 }
231 q2e = (void *)((char *)(bp->b_data) + blkoff);
232 ret = (*func)(ump, offp, q2e, off, a);
233 if (off != ulfs_rw64(*offp, needswap)) {
234 /* callback changed parent's pointer, redo */
235 off = ulfs_rw64(*offp, needswap);
236 if (bp != hbp && bp != obp)
237 ret2 = bwrite(bp);
238 } else {
239 /* parent if now current */
240 if (obp != bp && obp != hbp) {
241 if (flags & B_MODIFY)
242 ret2 = bwrite(obp);
243 else
244 brelse(obp, 0);
245 }
246 obp = bp;
247 olblkno = lblkno;
248 offp = &(q2e->q2e_next);
249 off = ulfs_rw64(*offp, needswap);
250 }
251 if (ret)
252 break;
253 if (ret2) {
254 ret = ret2;
255 break;
256 }
257 }
258 if (obp != hbp) {
259 if (flags & B_MODIFY)
260 ret2 = bwrite(obp);
261 else
262 brelse(obp, 0);
263 }
264 if (ret & Q2WL_ABORT)
265 return 0;
266 if (ret == 0)
267 return ret2;
268 return ret;
269 }
270
271 int
272 lfsquota2_umount(struct mount *mp, int flags)
273 {
274 int i, error;
275 struct ulfsmount *ump = VFSTOULFS(mp);
276
277 if ((ump->um_flags & ULFS_QUOTA2) == 0)
278 return 0;
279
280 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
281 if (ump->um_quotas[i] != NULLVP) {
282 error = vn_close(ump->um_quotas[i], FREAD|FWRITE,
283 ump->um_cred[i]);
284 if (error) {
285 printf("quota2_umount failed: close(%p) %d\n",
286 ump->um_quotas[i], error);
287 return error;
288 }
289 }
290 ump->um_quotas[i] = NULLVP;
291 }
292 return 0;
293 }
294
295 static int
296 quota2_q2ealloc(struct ulfsmount *ump, int type, uid_t uid, struct dquot *dq)
297 {
298 int error, error2;
299 struct buf *hbp, *bp;
300 struct quota2_header *q2h;
301 struct quota2_entry *q2e;
302 daddr_t offset;
303 u_long hash_mask;
304 const int needswap = ULFS_MPNEEDSWAP(ump);
305
306 KASSERT(mutex_owned(&dq->dq_interlock));
307 KASSERT(mutex_owned(&lfs_dqlock));
308 error = getq2h(ump, type, &hbp, &q2h, B_MODIFY);
309 if (error)
310 return error;
311 offset = ulfs_rw64(q2h->q2h_free, needswap);
312 if (offset == 0) {
313 struct vnode *vp = ump->um_quotas[type];
314 struct inode *ip = VTOI(vp);
315 uint64_t size = ip->i_size;
316 /* need to alocate a new disk block */
317 error = lfs_balloc(vp, size, ump->umq2_bsize,
318 ump->um_cred[type], B_CLRBUF | B_SYNC, &bp);
319 if (error) {
320 brelse(hbp, 0);
321 return error;
322 }
323 KASSERT((ip->i_size % ump->umq2_bsize) == 0);
324 ip->i_size += ump->umq2_bsize;
325 DIP_ASSIGN(ip, size, ip->i_size);
326 ip->i_flag |= IN_CHANGE | IN_UPDATE;
327 uvm_vnp_setsize(vp, ip->i_size);
328 lfsquota2_addfreeq2e(q2h, bp->b_data, size, ump->umq2_bsize,
329 needswap);
330 error = bwrite(bp);
331 error2 = lfs_update(vp, NULL, NULL, UPDATE_WAIT);
332 if (error || error2) {
333 brelse(hbp, 0);
334 if (error)
335 return error;
336 return error2;
337 }
338 offset = ulfs_rw64(q2h->q2h_free, needswap);
339 KASSERT(offset != 0);
340 }
341 dq->dq2_lblkno = (offset >> ump->um_mountp->mnt_fs_bshift);
342 dq->dq2_blkoff = (offset & ump->umq2_bmask);
343 if (dq->dq2_lblkno == 0) {
344 bp = hbp;
345 q2e = (void *)((char *)bp->b_data + dq->dq2_blkoff);
346 } else {
347 error = getq2e(ump, type, dq->dq2_lblkno,
348 dq->dq2_blkoff, &bp, &q2e, B_MODIFY);
349 if (error) {
350 brelse(hbp, 0);
351 return error;
352 }
353 }
354 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
355 /* remove from free list */
356 q2h->q2h_free = q2e->q2e_next;
357
358 memcpy(q2e, &q2h->q2h_defentry, sizeof(*q2e));
359 q2e->q2e_uid = ulfs_rw32(uid, needswap);
360 /* insert in hash list */
361 q2e->q2e_next = q2h->q2h_entries[uid & hash_mask];
362 q2h->q2h_entries[uid & hash_mask] = ulfs_rw64(offset, needswap);
363 if (hbp != bp) {
364 bwrite(hbp);
365 }
366 bwrite(bp);
367 return 0;
368 }
369
370 static int
371 getinoquota2(struct inode *ip, bool alloc, bool modify, struct buf **bpp,
372 struct quota2_entry **q2ep)
373 {
374 int error;
375 int i;
376 struct dquot *dq;
377 struct ulfsmount *ump = ip->i_ump;
378 u_int32_t ino_ids[ULFS_MAXQUOTAS];
379
380 error = lfs_getinoquota(ip);
381 if (error)
382 return error;
383
384 ino_ids[ULFS_USRQUOTA] = ip->i_uid;
385 ino_ids[ULFS_GRPQUOTA] = ip->i_gid;
386 /* first get the interlock for all dquot */
387 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
388 dq = ip->i_dquot[i];
389 if (dq == NODQUOT)
390 continue;
391 mutex_enter(&dq->dq_interlock);
392 }
393 /* now get the corresponding quota entry */
394 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
395 bpp[i] = NULL;
396 q2ep[i] = NULL;
397 dq = ip->i_dquot[i];
398 if (dq == NODQUOT)
399 continue;
400 if (__predict_false(ump->um_quotas[i] == NULL)) {
401 /*
402 * quotas have been turned off. This can happen
403 * at umount time.
404 */
405 mutex_exit(&dq->dq_interlock);
406 lfs_dqrele(NULLVP, dq);
407 ip->i_dquot[i] = NULL;
408 continue;
409 }
410
411 if ((dq->dq2_lblkno | dq->dq2_blkoff) == 0) {
412 if (!alloc) {
413 continue;
414 }
415 /* need to alloc a new on-disk quot */
416 mutex_enter(&lfs_dqlock);
417 error = quota2_q2ealloc(ump, i, ino_ids[i], dq);
418 mutex_exit(&lfs_dqlock);
419 if (error)
420 return error;
421 }
422 KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
423 error = getq2e(ump, i, dq->dq2_lblkno,
424 dq->dq2_blkoff, &bpp[i], &q2ep[i],
425 modify ? B_MODIFY : 0);
426 if (error)
427 return error;
428 }
429 return 0;
430 }
431
432 __inline static int __unused
433 lfsquota2_check_limit(struct quota2_val *q2v, uint64_t change, time_t now)
434 {
435 return lfsquota_check_limit(q2v->q2v_cur, change, q2v->q2v_softlimit,
436 q2v->q2v_hardlimit, q2v->q2v_time, now);
437 }
438
439 static int
440 quota2_check(struct inode *ip, int vtype, int64_t change, kauth_cred_t cred,
441 int flags)
442 {
443 int error;
444 struct buf *bp[ULFS_MAXQUOTAS];
445 struct quota2_entry *q2e[ULFS_MAXQUOTAS];
446 struct quota2_val *q2vp;
447 struct dquot *dq;
448 uint64_t ncurblks;
449 struct ulfsmount *ump = ip->i_ump;
450 struct mount *mp = ump->um_mountp;
451 const int needswap = ULFS_MPNEEDSWAP(ump);
452 int i;
453
454 if ((error = getinoquota2(ip, change > 0, change != 0, bp, q2e)) != 0)
455 return error;
456 if (change == 0) {
457 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
458 dq = ip->i_dquot[i];
459 if (dq == NODQUOT)
460 continue;
461 if (bp[i])
462 brelse(bp[i], 0);
463 mutex_exit(&dq->dq_interlock);
464 }
465 return 0;
466 }
467 if (change < 0) {
468 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
469 dq = ip->i_dquot[i];
470 if (dq == NODQUOT)
471 continue;
472 if (q2e[i] == NULL) {
473 mutex_exit(&dq->dq_interlock);
474 continue;
475 }
476 q2vp = &q2e[i]->q2e_val[vtype];
477 ncurblks = ulfs_rw64(q2vp->q2v_cur, needswap);
478 if (ncurblks < -change)
479 ncurblks = 0;
480 else
481 ncurblks += change;
482 q2vp->q2v_cur = ulfs_rw64(ncurblks, needswap);
483 quota2_bwrite(mp, bp[i]);
484 mutex_exit(&dq->dq_interlock);
485 }
486 return 0;
487 }
488 /* see if the allocation is allowed */
489 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
490 struct quota2_val q2v;
491 int ql_stat;
492 dq = ip->i_dquot[i];
493 if (dq == NODQUOT)
494 continue;
495 KASSERT(q2e[i] != NULL);
496 lfsquota2_ulfs_rwq2v(&q2e[i]->q2e_val[vtype], &q2v, needswap);
497 ql_stat = lfsquota2_check_limit(&q2v, change, time_second);
498
499 if ((flags & FORCE) == 0 &&
500 kauth_authorize_system(cred, KAUTH_SYSTEM_FS_QUOTA,
501 KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT,
502 KAUTH_ARG(i), KAUTH_ARG(vtype), NULL) != 0) {
503 /* enforce this limit */
504 switch(QL_STATUS(ql_stat)) {
505 case QL_S_DENY_HARD:
506 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
507 uprintf("\n%s: write failed, %s %s "
508 "limit reached\n",
509 mp->mnt_stat.f_mntonname,
510 lfs_quotatypes[i], limnames[vtype]);
511 dq->dq_flags |= DQ_WARN(vtype);
512 }
513 error = EDQUOT;
514 break;
515 case QL_S_DENY_GRACE:
516 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
517 uprintf("\n%s: write failed, %s %s "
518 "limit reached\n",
519 mp->mnt_stat.f_mntonname,
520 lfs_quotatypes[i], limnames[vtype]);
521 dq->dq_flags |= DQ_WARN(vtype);
522 }
523 error = EDQUOT;
524 break;
525 case QL_S_ALLOW_SOFT:
526 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
527 uprintf("\n%s: warning, %s %s "
528 "quota exceeded\n",
529 mp->mnt_stat.f_mntonname,
530 lfs_quotatypes[i], limnames[vtype]);
531 dq->dq_flags |= DQ_WARN(vtype);
532 }
533 break;
534 }
535 }
536 /*
537 * always do this; we don't know if the allocation will
538 * succed or not in the end. if we don't do the allocation
539 * q2v_time will be ignored anyway
540 */
541 if (ql_stat & QL_F_CROSS) {
542 q2v.q2v_time = time_second + q2v.q2v_grace;
543 lfsquota2_ulfs_rwq2v(&q2v, &q2e[i]->q2e_val[vtype],
544 needswap);
545 }
546 }
547
548 /* now do the allocation if allowed */
549 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
550 dq = ip->i_dquot[i];
551 if (dq == NODQUOT)
552 continue;
553 KASSERT(q2e[i] != NULL);
554 if (error == 0) {
555 q2vp = &q2e[i]->q2e_val[vtype];
556 ncurblks = ulfs_rw64(q2vp->q2v_cur, needswap);
557 q2vp->q2v_cur = ulfs_rw64(ncurblks + change, needswap);
558 quota2_bwrite(mp, bp[i]);
559 } else
560 brelse(bp[i], 0);
561 mutex_exit(&dq->dq_interlock);
562 }
563 return error;
564 }
565
566 int
567 lfs_chkdq2(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
568 {
569 return quota2_check(ip, QL_BLOCK, change, cred, flags);
570 }
571
572 int
573 lfs_chkiq2(struct inode *ip, int32_t change, kauth_cred_t cred, int flags)
574 {
575 return quota2_check(ip, QL_FILE, change, cred, flags);
576 }
577
578 int
579 lfsquota2_handle_cmd_put(struct ulfsmount *ump, const struct quotakey *key,
580 const struct quotaval *val)
581 {
582 int error;
583 struct dquot *dq;
584 struct quota2_header *q2h;
585 struct quota2_entry q2e, *q2ep;
586 struct buf *bp;
587 const int needswap = ULFS_MPNEEDSWAP(ump);
588
589 /* make sure we can index by the fs-independent idtype */
590 CTASSERT(QUOTA_IDTYPE_USER == ULFS_USRQUOTA);
591 CTASSERT(QUOTA_IDTYPE_GROUP == ULFS_GRPQUOTA);
592
593 if (ump->um_quotas[key->qk_idtype] == NULLVP)
594 return ENODEV;
595
596 if (key->qk_id == QUOTA_DEFAULTID) {
597 mutex_enter(&lfs_dqlock);
598 error = getq2h(ump, key->qk_idtype, &bp, &q2h, B_MODIFY);
599 if (error) {
600 mutex_exit(&lfs_dqlock);
601 goto out_wapbl;
602 }
603 lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
604 quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
605 lfsquota2_ulfs_rwq2e(&q2e, &q2h->q2h_defentry, needswap);
606 mutex_exit(&lfs_dqlock);
607 quota2_bwrite(ump->um_mountp, bp);
608 goto out_wapbl;
609 }
610
611 error = lfs_dqget(NULLVP, key->qk_id, ump, key->qk_idtype, &dq);
612 if (error)
613 goto out_wapbl;
614
615 mutex_enter(&dq->dq_interlock);
616 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
617 /* need to alloc a new on-disk quot */
618 mutex_enter(&lfs_dqlock);
619 error = quota2_q2ealloc(ump, key->qk_idtype, key->qk_id, dq);
620 mutex_exit(&lfs_dqlock);
621 if (error)
622 goto out_il;
623 }
624 KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
625 error = getq2e(ump, key->qk_idtype, dq->dq2_lblkno,
626 dq->dq2_blkoff, &bp, &q2ep, B_MODIFY);
627 if (error)
628 goto out_il;
629
630 lfsquota2_ulfs_rwq2e(q2ep, &q2e, needswap);
631 quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
632 lfsquota2_ulfs_rwq2e(&q2e, q2ep, needswap);
633 quota2_bwrite(ump->um_mountp, bp);
634
635 out_il:
636 mutex_exit(&dq->dq_interlock);
637 lfs_dqrele(NULLVP, dq);
638 out_wapbl:
639 return error;
640 }
641
642 struct dq2clear_callback {
643 uid_t id;
644 struct dquot *dq;
645 struct quota2_header *q2h;
646 };
647
648 static int
649 dq2clear_callback(struct ulfsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
650 uint64_t off, void *v)
651 {
652 struct dq2clear_callback *c = v;
653 #ifdef LFS_EI
654 const int needswap = ULFS_MPNEEDSWAP(ump);
655 #endif
656 uint64_t myoff;
657
658 if (ulfs_rw32(q2e->q2e_uid, needswap) == c->id) {
659 KASSERT(mutex_owned(&c->dq->dq_interlock));
660 c->dq->dq2_lblkno = 0;
661 c->dq->dq2_blkoff = 0;
662 myoff = *offp;
663 /* remove from hash list */
664 *offp = q2e->q2e_next;
665 /* add to free list */
666 q2e->q2e_next = c->q2h->q2h_free;
667 c->q2h->q2h_free = myoff;
668 return Q2WL_ABORT;
669 }
670 return 0;
671 }
672 int
673 lfsquota2_handle_cmd_delete(struct ulfsmount *ump, const struct quotakey *qk)
674 {
675 int idtype;
676 id_t id;
677 int objtype;
678 int error, i, canfree;
679 struct dquot *dq;
680 struct quota2_header *q2h;
681 struct quota2_entry q2e, *q2ep;
682 struct buf *hbp, *bp;
683 u_long hash_mask;
684 struct dq2clear_callback c;
685
686 idtype = qk->qk_idtype;
687 id = qk->qk_id;
688 objtype = qk->qk_objtype;
689
690 if (ump->um_quotas[idtype] == NULLVP)
691 return ENODEV;
692 if (id == QUOTA_DEFAULTID)
693 return EOPNOTSUPP;
694
695 /* get the default entry before locking the entry's buffer */
696 mutex_enter(&lfs_dqlock);
697 error = getq2h(ump, idtype, &hbp, &q2h, 0);
698 if (error) {
699 mutex_exit(&lfs_dqlock);
700 return error;
701 }
702 /* we'll copy to another disk entry, so no need to swap */
703 memcpy(&q2e, &q2h->q2h_defentry, sizeof(q2e));
704 mutex_exit(&lfs_dqlock);
705 brelse(hbp, 0);
706
707 error = lfs_dqget(NULLVP, id, ump, idtype, &dq);
708 if (error)
709 return error;
710
711 mutex_enter(&dq->dq_interlock);
712 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
713 /* already clear, nothing to do */
714 error = ENOENT;
715 goto out_il;
716 }
717
718 error = getq2e(ump, idtype, dq->dq2_lblkno, dq->dq2_blkoff,
719 &bp, &q2ep, B_MODIFY);
720 if (error)
721 goto out_wapbl;
722
723 /* make sure we can index by the objtype passed in */
724 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
725 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
726
727 /* clear the requested objtype by copying from the default entry */
728 q2ep->q2e_val[objtype].q2v_softlimit =
729 q2e.q2e_val[objtype].q2v_softlimit;
730 q2ep->q2e_val[objtype].q2v_hardlimit =
731 q2e.q2e_val[objtype].q2v_hardlimit;
732 q2ep->q2e_val[objtype].q2v_grace =
733 q2e.q2e_val[objtype].q2v_grace;
734 q2ep->q2e_val[objtype].q2v_time = 0;
735
736 /* if this entry now contains no information, we can free it */
737 canfree = 1;
738 for (i = 0; i < N_QL; i++) {
739 if (q2ep->q2e_val[i].q2v_cur != 0 ||
740 (q2ep->q2e_val[i].q2v_softlimit !=
741 q2e.q2e_val[i].q2v_softlimit) ||
742 (q2ep->q2e_val[i].q2v_hardlimit !=
743 q2e.q2e_val[i].q2v_hardlimit) ||
744 (q2ep->q2e_val[i].q2v_grace !=
745 q2e.q2e_val[i].q2v_grace)) {
746 canfree = 0;
747 break;
748 }
749 /* note: do not need to check q2v_time */
750 }
751
752 if (canfree == 0) {
753 quota2_bwrite(ump->um_mountp, bp);
754 goto out_wapbl;
755 }
756 /* we can free it. release bp so we can walk the list */
757 brelse(bp, 0);
758 mutex_enter(&lfs_dqlock);
759 error = getq2h(ump, idtype, &hbp, &q2h, 0);
760 if (error)
761 goto out_dqlock;
762
763 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
764 c.dq = dq;
765 c.id = id;
766 c.q2h = q2h;
767 error = quota2_walk_list(ump, hbp, idtype,
768 &q2h->q2h_entries[id & hash_mask], B_MODIFY, &c,
769 dq2clear_callback);
770
771 bwrite(hbp);
772
773 out_dqlock:
774 mutex_exit(&lfs_dqlock);
775 out_wapbl:
776 out_il:
777 mutex_exit(&dq->dq_interlock);
778 lfs_dqrele(NULLVP, dq);
779 return error;
780 }
781
782 static int
783 quota2_fetch_q2e(struct ulfsmount *ump, const struct quotakey *qk,
784 struct quota2_entry *ret)
785 {
786 struct dquot *dq;
787 int error;
788 struct quota2_entry *q2ep;
789 struct buf *bp;
790 const int needswap = ULFS_MPNEEDSWAP(ump);
791
792 error = lfs_dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
793 if (error)
794 return error;
795
796 mutex_enter(&dq->dq_interlock);
797 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
798 mutex_exit(&dq->dq_interlock);
799 lfs_dqrele(NULLVP, dq);
800 return ENOENT;
801 }
802 error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
803 &bp, &q2ep, 0);
804 if (error) {
805 mutex_exit(&dq->dq_interlock);
806 lfs_dqrele(NULLVP, dq);
807 return error;
808 }
809 lfsquota2_ulfs_rwq2e(q2ep, ret, needswap);
810 brelse(bp, 0);
811 mutex_exit(&dq->dq_interlock);
812 lfs_dqrele(NULLVP, dq);
813
814 return 0;
815 }
816
817 static int
818 quota2_fetch_quotaval(struct ulfsmount *ump, const struct quotakey *qk,
819 struct quotaval *ret)
820 {
821 struct dquot *dq;
822 int error;
823 struct quota2_entry *q2ep, q2e;
824 struct buf *bp;
825 const int needswap = ULFS_MPNEEDSWAP(ump);
826 id_t id2;
827
828 error = lfs_dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
829 if (error)
830 return error;
831
832 mutex_enter(&dq->dq_interlock);
833 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
834 mutex_exit(&dq->dq_interlock);
835 lfs_dqrele(NULLVP, dq);
836 return ENOENT;
837 }
838 error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
839 &bp, &q2ep, 0);
840 if (error) {
841 mutex_exit(&dq->dq_interlock);
842 lfs_dqrele(NULLVP, dq);
843 return error;
844 }
845 lfsquota2_ulfs_rwq2e(q2ep, &q2e, needswap);
846 brelse(bp, 0);
847 mutex_exit(&dq->dq_interlock);
848 lfs_dqrele(NULLVP, dq);
849
850 q2e_to_quotaval(&q2e, 0, &id2, qk->qk_objtype, ret);
851 KASSERT(id2 == qk->qk_id);
852 return 0;
853 }
854
855 int
856 lfsquota2_handle_cmd_get(struct ulfsmount *ump, const struct quotakey *qk,
857 struct quotaval *qv)
858 {
859 int error;
860 struct quota2_header *q2h;
861 struct quota2_entry q2e;
862 struct buf *bp;
863 const int needswap = ULFS_MPNEEDSWAP(ump);
864 id_t id2;
865
866 /*
867 * Make sure the FS-independent codes match the internal ones,
868 * so we can use the passed-in objtype without having to
869 * convert it explicitly to QL_BLOCK/QL_FILE.
870 */
871 CTASSERT(QL_BLOCK == QUOTA_OBJTYPE_BLOCKS);
872 CTASSERT(QL_FILE == QUOTA_OBJTYPE_FILES);
873 CTASSERT(N_QL == 2);
874
875 if (qk->qk_objtype < 0 || qk->qk_objtype >= N_QL) {
876 return EINVAL;
877 }
878
879 if (ump->um_quotas[qk->qk_idtype] == NULLVP)
880 return ENODEV;
881 if (qk->qk_id == QUOTA_DEFAULTID) {
882 mutex_enter(&lfs_dqlock);
883 error = getq2h(ump, qk->qk_idtype, &bp, &q2h, 0);
884 if (error) {
885 mutex_exit(&lfs_dqlock);
886 return error;
887 }
888 lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
889 mutex_exit(&lfs_dqlock);
890 brelse(bp, 0);
891 q2e_to_quotaval(&q2e, qk->qk_id == QUOTA_DEFAULTID, &id2,
892 qk->qk_objtype, qv);
893 (void)id2;
894 } else
895 error = quota2_fetch_quotaval(ump, qk, qv);
896
897 return error;
898 }
899
900 /*
901 * Cursor structure we used.
902 *
903 * This will get stored in userland between calls so we must not assume
904 * it isn't arbitrarily corrupted.
905 */
906 struct ulfsq2_cursor {
907 uint32_t q2c_magic; /* magic number */
908 int q2c_hashsize; /* size of hash table at last go */
909
910 int q2c_users_done; /* true if we've returned all user data */
911 int q2c_groups_done; /* true if we've returned all group data */
912 int q2c_defaults_done; /* true if we've returned the default values */
913 int q2c_hashpos; /* slot to start at in hash table */
914 int q2c_uidpos; /* number of ids we've handled */
915 int q2c_blocks_done; /* true if we've returned the blocks value */
916 };
917
918 /*
919 * State of a single cursorget call, or at least the part of it that
920 * needs to be passed around.
921 */
922 struct q2cursor_state {
923 /* data return pointers */
924 struct quotakey *keys;
925 struct quotaval *vals;
926
927 /* key/value counters */
928 unsigned maxkeyvals;
929 unsigned numkeys; /* number of keys assigned */
930
931 /* ID to key/value conversion state */
932 int skipfirst; /* if true skip first key/value */
933 int skiplast; /* if true skip last key/value */
934
935 /* ID counters */
936 unsigned maxids; /* maximum number of IDs to handle */
937 unsigned numids; /* number of IDs handled */
938 };
939
940 /*
941 * Additional structure for getids callback.
942 */
943 struct q2cursor_getids {
944 struct q2cursor_state *state;
945 int idtype;
946 unsigned skip; /* number of ids to skip over */
947 unsigned new_skip; /* number of ids to skip over next time */
948 unsigned skipped; /* number skipped so far */
949 int stopped; /* true if we stopped quota_walk_list early */
950 };
951
952 /*
953 * Cursor-related functions
954 */
955
956 /* magic number */
957 #define Q2C_MAGIC (0xbeebe111)
958
959 /* extract cursor from caller form */
960 #define Q2CURSOR(qkc) ((struct ulfsq2_cursor *)&qkc->u.qkc_space[0])
961
962 /*
963 * Check that a cursor we're handed is something like valid. If
964 * someone munges it and it still passes these checks, they'll get
965 * partial or odd results back but won't break anything.
966 */
967 static int
968 q2cursor_check(struct ulfsq2_cursor *cursor)
969 {
970 if (cursor->q2c_magic != Q2C_MAGIC) {
971 return EINVAL;
972 }
973 if (cursor->q2c_hashsize < 0) {
974 return EINVAL;
975 }
976
977 if (cursor->q2c_users_done != 0 && cursor->q2c_users_done != 1) {
978 return EINVAL;
979 }
980 if (cursor->q2c_groups_done != 0 && cursor->q2c_groups_done != 1) {
981 return EINVAL;
982 }
983 if (cursor->q2c_defaults_done != 0 && cursor->q2c_defaults_done != 1) {
984 return EINVAL;
985 }
986 if (cursor->q2c_hashpos < 0 || cursor->q2c_uidpos < 0) {
987 return EINVAL;
988 }
989 if (cursor->q2c_blocks_done != 0 && cursor->q2c_blocks_done != 1) {
990 return EINVAL;
991 }
992 return 0;
993 }
994
995 /*
996 * Set up the q2cursor state.
997 */
998 static void
999 q2cursor_initstate(struct q2cursor_state *state, struct quotakey *keys,
1000 struct quotaval *vals, unsigned maxkeyvals, int blocks_done)
1001 {
1002 state->keys = keys;
1003 state->vals = vals;
1004
1005 state->maxkeyvals = maxkeyvals;
1006 state->numkeys = 0;
1007
1008 /*
1009 * For each ID there are two quotavals to return. If the
1010 * maximum number of entries to return is odd, we might want
1011 * to skip the first quotaval of the first ID, or the last
1012 * quotaval of the last ID, but not both. So the number of IDs
1013 * we want is (up to) half the number of return slots we have,
1014 * rounded up.
1015 */
1016
1017 state->maxids = (state->maxkeyvals + 1) / 2;
1018 state->numids = 0;
1019 if (state->maxkeyvals % 2) {
1020 if (blocks_done) {
1021 state->skipfirst = 1;
1022 state->skiplast = 0;
1023 } else {
1024 state->skipfirst = 0;
1025 state->skiplast = 1;
1026 }
1027 } else {
1028 state->skipfirst = 0;
1029 state->skiplast = 0;
1030 }
1031 }
1032
1033 /*
1034 * Choose which idtype we're going to work on. If doing a full
1035 * iteration, we do users first, then groups, but either might be
1036 * disabled or marked to skip via cursorsetidtype(), so don't make
1037 * silly assumptions.
1038 */
1039 static int
1040 q2cursor_pickidtype(struct ulfsq2_cursor *cursor, int *idtype_ret)
1041 {
1042 if (cursor->q2c_users_done == 0) {
1043 *idtype_ret = QUOTA_IDTYPE_USER;
1044 } else if (cursor->q2c_groups_done == 0) {
1045 *idtype_ret = QUOTA_IDTYPE_GROUP;
1046 } else {
1047 return EAGAIN;
1048 }
1049 return 0;
1050 }
1051
1052 /*
1053 * Add an ID to the current state. Sets up either one or two keys to
1054 * refer to it, depending on whether it's first/last and the setting
1055 * of skipfirst. (skiplast does not need to be explicitly tested)
1056 */
1057 static void
1058 q2cursor_addid(struct q2cursor_state *state, int idtype, id_t id)
1059 {
1060 KASSERT(state->numids < state->maxids);
1061 KASSERT(state->numkeys < state->maxkeyvals);
1062
1063 if (!state->skipfirst || state->numkeys > 0) {
1064 state->keys[state->numkeys].qk_idtype = idtype;
1065 state->keys[state->numkeys].qk_id = id;
1066 state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_BLOCKS;
1067 state->numkeys++;
1068 }
1069 if (state->numkeys < state->maxkeyvals) {
1070 state->keys[state->numkeys].qk_idtype = idtype;
1071 state->keys[state->numkeys].qk_id = id;
1072 state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_FILES;
1073 state->numkeys++;
1074 } else {
1075 KASSERT(state->skiplast);
1076 }
1077 state->numids++;
1078 }
1079
1080 /*
1081 * Callback function for getting IDs. Update counting and call addid.
1082 */
1083 static int
1084 q2cursor_getids_callback(struct ulfsmount *ump, uint64_t *offp,
1085 struct quota2_entry *q2ep, uint64_t off, void *v)
1086 {
1087 struct q2cursor_getids *gi = v;
1088 id_t id;
1089 #ifdef LFS_EI
1090 const int needswap = ULFS_MPNEEDSWAP(ump);
1091 #endif
1092
1093 if (gi->skipped < gi->skip) {
1094 gi->skipped++;
1095 return 0;
1096 }
1097 id = ulfs_rw32(q2ep->q2e_uid, needswap);
1098 q2cursor_addid(gi->state, gi->idtype, id);
1099 gi->new_skip++;
1100 if (gi->state->numids >= gi->state->maxids) {
1101 /* got enough ids, stop now */
1102 gi->stopped = 1;
1103 return Q2WL_ABORT;
1104 }
1105 return 0;
1106 }
1107
1108 /*
1109 * Fill in a batch of quotakeys by scanning one or more hash chains.
1110 */
1111 static int
1112 q2cursor_getkeys(struct ulfsmount *ump, int idtype, struct ulfsq2_cursor *cursor,
1113 struct q2cursor_state *state,
1114 int *hashsize_ret, struct quota2_entry *default_q2e_ret)
1115 {
1116 const int needswap = ULFS_MPNEEDSWAP(ump);
1117 struct buf *hbp;
1118 struct quota2_header *q2h;
1119 int quota2_hash_size;
1120 struct q2cursor_getids gi;
1121 uint64_t offset;
1122 int error;
1123
1124 /*
1125 * Read the header block.
1126 */
1127
1128 mutex_enter(&lfs_dqlock);
1129 error = getq2h(ump, idtype, &hbp, &q2h, 0);
1130 if (error) {
1131 mutex_exit(&lfs_dqlock);
1132 return error;
1133 }
1134
1135 /* if the table size has changed, make the caller start over */
1136 quota2_hash_size = ulfs_rw16(q2h->q2h_hash_size, needswap);
1137 if (cursor->q2c_hashsize == 0) {
1138 cursor->q2c_hashsize = quota2_hash_size;
1139 } else if (cursor->q2c_hashsize != quota2_hash_size) {
1140 error = EDEADLK;
1141 goto scanfail;
1142 }
1143
1144 /* grab the entry with the default values out of the header */
1145 lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, default_q2e_ret, needswap);
1146
1147 /* If we haven't done the defaults yet, that goes first. */
1148 if (cursor->q2c_defaults_done == 0) {
1149 q2cursor_addid(state, idtype, QUOTA_DEFAULTID);
1150 /* if we read both halves, mark it done */
1151 if (state->numids < state->maxids || !state->skiplast) {
1152 cursor->q2c_defaults_done = 1;
1153 }
1154 }
1155
1156 gi.state = state;
1157 gi.idtype = idtype;
1158
1159 while (state->numids < state->maxids) {
1160 if (cursor->q2c_hashpos >= quota2_hash_size) {
1161 /* nothing more left */
1162 break;
1163 }
1164
1165 /* scan this hash chain */
1166 gi.skip = cursor->q2c_uidpos;
1167 gi.new_skip = gi.skip;
1168 gi.skipped = 0;
1169 gi.stopped = 0;
1170 offset = q2h->q2h_entries[cursor->q2c_hashpos];
1171
1172 error = quota2_walk_list(ump, hbp, idtype, &offset, 0, &gi,
1173 q2cursor_getids_callback);
1174 KASSERT(error != Q2WL_ABORT);
1175 if (error) {
1176 break;
1177 }
1178 if (gi.stopped) {
1179 /* callback stopped before reading whole chain */
1180 cursor->q2c_uidpos = gi.new_skip;
1181 /* if we didn't get both halves, back up */
1182 if (state->numids == state->maxids && state->skiplast){
1183 KASSERT(cursor->q2c_uidpos > 0);
1184 cursor->q2c_uidpos--;
1185 }
1186 } else {
1187 /* read whole chain */
1188 /* if we got both halves of the last id, advance */
1189 if (state->numids < state->maxids || !state->skiplast){
1190 cursor->q2c_uidpos = 0;
1191 cursor->q2c_hashpos++;
1192 }
1193 }
1194 }
1195
1196 scanfail:
1197 mutex_exit(&lfs_dqlock);
1198 brelse(hbp, 0);
1199 if (error)
1200 return error;
1201
1202 *hashsize_ret = quota2_hash_size;
1203 return 0;
1204 }
1205
1206 /*
1207 * Fetch the quotavals for the quotakeys.
1208 */
1209 static int
1210 q2cursor_getvals(struct ulfsmount *ump, struct q2cursor_state *state,
1211 const struct quota2_entry *default_q2e)
1212 {
1213 int hasid;
1214 id_t loadedid, id;
1215 unsigned pos;
1216 struct quota2_entry q2e;
1217 int objtype;
1218 int error;
1219
1220 hasid = 0;
1221 loadedid = 0;
1222 for (pos = 0; pos < state->numkeys; pos++) {
1223 id = state->keys[pos].qk_id;
1224 if (!hasid || id != loadedid) {
1225 hasid = 1;
1226 loadedid = id;
1227 if (id == QUOTA_DEFAULTID) {
1228 q2e = *default_q2e;
1229 } else {
1230 error = quota2_fetch_q2e(ump,
1231 &state->keys[pos],
1232 &q2e);
1233 if (error == ENOENT) {
1234 /* something changed - start over */
1235 error = EDEADLK;
1236 }
1237 if (error) {
1238 return error;
1239 }
1240 }
1241 }
1242
1243
1244 objtype = state->keys[pos].qk_objtype;
1245 KASSERT(objtype >= 0 && objtype < N_QL);
1246 q2val_to_quotaval(&q2e.q2e_val[objtype], &state->vals[pos]);
1247 }
1248
1249 return 0;
1250 }
1251
1252 /*
1253 * Handle cursorget.
1254 *
1255 * We can't just read keys and values directly, because we can't walk
1256 * the list with qdlock and grab dq_interlock to read the entries at
1257 * the same time. So we're going to do two passes: one to figure out
1258 * which IDs we want and fill in the keys, and then a second to use
1259 * the keys to fetch the values.
1260 */
1261 int
1262 lfsquota2_handle_cmd_cursorget(struct ulfsmount *ump, struct quotakcursor *qkc,
1263 struct quotakey *keys, struct quotaval *vals, unsigned maxreturn,
1264 unsigned *ret)
1265 {
1266 int error;
1267 struct ulfsq2_cursor *cursor;
1268 struct ulfsq2_cursor newcursor;
1269 struct q2cursor_state state;
1270 struct quota2_entry default_q2e;
1271 int idtype;
1272 int quota2_hash_size;
1273
1274 /*
1275 * Convert and validate the cursor.
1276 */
1277 cursor = Q2CURSOR(qkc);
1278 error = q2cursor_check(cursor);
1279 if (error) {
1280 return error;
1281 }
1282
1283 /*
1284 * Make sure our on-disk codes match the values of the
1285 * FS-independent ones. This avoids the need for explicit
1286 * conversion (which would be a NOP anyway and thus easily
1287 * left out or called in the wrong places...)
1288 */
1289 CTASSERT(QUOTA_IDTYPE_USER == ULFS_USRQUOTA);
1290 CTASSERT(QUOTA_IDTYPE_GROUP == ULFS_GRPQUOTA);
1291 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
1292 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
1293
1294 /*
1295 * If some of the idtypes aren't configured/enabled, arrange
1296 * to skip over them.
1297 */
1298 if (cursor->q2c_users_done == 0 &&
1299 ump->um_quotas[ULFS_USRQUOTA] == NULLVP) {
1300 cursor->q2c_users_done = 1;
1301 }
1302 if (cursor->q2c_groups_done == 0 &&
1303 ump->um_quotas[ULFS_GRPQUOTA] == NULLVP) {
1304 cursor->q2c_groups_done = 1;
1305 }
1306
1307 /* Loop over, potentially, both idtypes */
1308 while (1) {
1309
1310 /* Choose id type */
1311 error = q2cursor_pickidtype(cursor, &idtype);
1312 if (error == EAGAIN) {
1313 /* nothing more to do, return 0 */
1314 *ret = 0;
1315 return 0;
1316 }
1317 KASSERT(ump->um_quotas[idtype] != NULLVP);
1318
1319 /*
1320 * Initialize the per-call iteration state. Copy the
1321 * cursor state so we can update it in place but back
1322 * out on error.
1323 */
1324 q2cursor_initstate(&state, keys, vals, maxreturn,
1325 cursor->q2c_blocks_done);
1326 newcursor = *cursor;
1327
1328 /* Assign keys */
1329 error = q2cursor_getkeys(ump, idtype, &newcursor, &state,
1330 "a2_hash_size, &default_q2e);
1331 if (error) {
1332 return error;
1333 }
1334
1335 /* Now fill in the values. */
1336 error = q2cursor_getvals(ump, &state, &default_q2e);
1337 if (error) {
1338 return error;
1339 }
1340
1341 /*
1342 * Now that we aren't going to fail and lose what we
1343 * did so far, we can update the cursor state.
1344 */
1345
1346 if (newcursor.q2c_hashpos >= quota2_hash_size) {
1347 if (idtype == QUOTA_IDTYPE_USER)
1348 cursor->q2c_users_done = 1;
1349 else
1350 cursor->q2c_groups_done = 1;
1351
1352 /* start over on another id type */
1353 cursor->q2c_hashsize = 0;
1354 cursor->q2c_defaults_done = 0;
1355 cursor->q2c_hashpos = 0;
1356 cursor->q2c_uidpos = 0;
1357 cursor->q2c_blocks_done = 0;
1358 } else {
1359 *cursor = newcursor;
1360 cursor->q2c_blocks_done = state.skiplast;
1361 }
1362
1363 /*
1364 * If we have something to return, return it.
1365 * Otherwise, continue to the other idtype, if any,
1366 * and only return zero at end of iteration.
1367 */
1368 if (state.numkeys > 0) {
1369 break;
1370 }
1371 }
1372
1373 *ret = state.numkeys;
1374 return 0;
1375 }
1376
1377 int
1378 lfsquota2_handle_cmd_cursoropen(struct ulfsmount *ump, struct quotakcursor *qkc)
1379 {
1380 struct ulfsq2_cursor *cursor;
1381
1382 CTASSERT(sizeof(*cursor) <= sizeof(qkc->u.qkc_space));
1383 cursor = Q2CURSOR(qkc);
1384
1385 cursor->q2c_magic = Q2C_MAGIC;
1386 cursor->q2c_hashsize = 0;
1387
1388 cursor->q2c_users_done = 0;
1389 cursor->q2c_groups_done = 0;
1390 cursor->q2c_defaults_done = 0;
1391 cursor->q2c_hashpos = 0;
1392 cursor->q2c_uidpos = 0;
1393 cursor->q2c_blocks_done = 0;
1394 return 0;
1395 }
1396
1397 int
1398 lfsquota2_handle_cmd_cursorclose(struct ulfsmount *ump, struct quotakcursor *qkc)
1399 {
1400 struct ulfsq2_cursor *cursor;
1401 int error;
1402
1403 cursor = Q2CURSOR(qkc);
1404 error = q2cursor_check(cursor);
1405 if (error) {
1406 return error;
1407 }
1408
1409 /* nothing to do */
1410
1411 return 0;
1412 }
1413
1414 int
1415 lfsquota2_handle_cmd_cursorskipidtype(struct ulfsmount *ump,
1416 struct quotakcursor *qkc, int idtype)
1417 {
1418 struct ulfsq2_cursor *cursor;
1419 int error;
1420
1421 cursor = Q2CURSOR(qkc);
1422 error = q2cursor_check(cursor);
1423 if (error) {
1424 return error;
1425 }
1426
1427 switch (idtype) {
1428 case QUOTA_IDTYPE_USER:
1429 cursor->q2c_users_done = 1;
1430 break;
1431 case QUOTA_IDTYPE_GROUP:
1432 cursor->q2c_groups_done = 1;
1433 break;
1434 default:
1435 return EINVAL;
1436 }
1437
1438 return 0;
1439 }
1440
1441 int
1442 lfsquota2_handle_cmd_cursoratend(struct ulfsmount *ump, struct quotakcursor *qkc,
1443 int *ret)
1444 {
1445 struct ulfsq2_cursor *cursor;
1446 int error;
1447
1448 cursor = Q2CURSOR(qkc);
1449 error = q2cursor_check(cursor);
1450 if (error) {
1451 return error;
1452 }
1453
1454 *ret = (cursor->q2c_users_done && cursor->q2c_groups_done);
1455 return 0;
1456 }
1457
1458 int
1459 lfsquota2_handle_cmd_cursorrewind(struct ulfsmount *ump, struct quotakcursor *qkc)
1460 {
1461 struct ulfsq2_cursor *cursor;
1462 int error;
1463
1464 cursor = Q2CURSOR(qkc);
1465 error = q2cursor_check(cursor);
1466 if (error) {
1467 return error;
1468 }
1469
1470 cursor->q2c_hashsize = 0;
1471
1472 cursor->q2c_users_done = 0;
1473 cursor->q2c_groups_done = 0;
1474 cursor->q2c_defaults_done = 0;
1475 cursor->q2c_hashpos = 0;
1476 cursor->q2c_uidpos = 0;
1477 cursor->q2c_blocks_done = 0;
1478
1479 return 0;
1480 }
1481
1482 int
1483 lfs_q2sync(struct mount *mp)
1484 {
1485 return 0;
1486 }
1487
1488 struct dq2get_callback {
1489 uid_t id;
1490 struct dquot *dq;
1491 };
1492
1493 static int
1494 dq2get_callback(struct ulfsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
1495 uint64_t off, void *v)
1496 {
1497 struct dq2get_callback *c = v;
1498 daddr_t lblkno;
1499 int blkoff;
1500 #ifdef LFS_EI
1501 const int needswap = ULFS_MPNEEDSWAP(ump);
1502 #endif
1503
1504 if (ulfs_rw32(q2e->q2e_uid, needswap) == c->id) {
1505 KASSERT(mutex_owned(&c->dq->dq_interlock));
1506 lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
1507 blkoff = (off & ump->umq2_bmask);
1508 c->dq->dq2_lblkno = lblkno;
1509 c->dq->dq2_blkoff = blkoff;
1510 return Q2WL_ABORT;
1511 }
1512 return 0;
1513 }
1514
1515 int
1516 lfs_dq2get(struct vnode *dqvp, u_long id, struct ulfsmount *ump, int type,
1517 struct dquot *dq)
1518 {
1519 struct buf *bp;
1520 struct quota2_header *q2h;
1521 int error;
1522 daddr_t offset;
1523 u_long hash_mask;
1524 struct dq2get_callback c = {
1525 .id = id,
1526 .dq = dq
1527 };
1528
1529 KASSERT(mutex_owned(&dq->dq_interlock));
1530 mutex_enter(&lfs_dqlock);
1531 error = getq2h(ump, type, &bp, &q2h, 0);
1532 if (error)
1533 goto out_mutex;
1534 /* look for our entry */
1535 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
1536 offset = q2h->q2h_entries[id & hash_mask];
1537 error = quota2_walk_list(ump, bp, type, &offset, 0, (void *)&c,
1538 dq2get_callback);
1539 brelse(bp, 0);
1540 out_mutex:
1541 mutex_exit(&lfs_dqlock);
1542 return error;
1543 }
1544
1545 int
1546 lfs_dq2sync(struct vnode *vp, struct dquot *dq)
1547 {
1548 return 0;
1549 }
1550