ulfs_quota2.c revision 1.6 1 /* $NetBSD: ulfs_quota2.c,v 1.6 2013/06/08 22:05:15 dholland Exp $ */
2 /* from NetBSD: ufs_quota2.c,v 1.35 2012/09/27 07:47:56 bouyer Exp */
3
4 /*-
5 * Copyright (c) 2010 Manuel Bouyer
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: ulfs_quota2.c,v 1.6 2013/06/08 22:05:15 dholland Exp $");
32
33 #include <sys/buf.h>
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/systm.h>
37 #include <sys/namei.h>
38 #include <sys/file.h>
39 #include <sys/proc.h>
40 #include <sys/vnode.h>
41 #include <sys/mount.h>
42 #include <sys/fstrans.h>
43 #include <sys/kauth.h>
44 #include <sys/wapbl.h>
45 #include <sys/quota.h>
46 #include <sys/quotactl.h>
47
48 #include <ufs/lfs/ulfs_quota2.h>
49 #include <ufs/lfs/ulfs_inode.h>
50 #include <ufs/lfs/ulfsmount.h>
51 #include <ufs/lfs/ulfs_bswap.h>
52 #include <ufs/lfs/ulfs_extern.h>
53 #include <ufs/lfs/ulfs_quota.h>
54
55 /*
56 * LOCKING:
57 * Data in the entries are protected by the associated struct dquot's
58 * dq_interlock (this means we can't read or change a quota entry without
59 * grabing a dquot for it).
60 * The header and lists (including pointers in the data entries, and q2e_uid)
61 * are protected by the global dqlock.
62 * the locking order is dq_interlock -> dqlock
63 */
64
65 static int quota2_bwrite(struct mount *, struct buf *);
66 static int getinoquota2(struct inode *, bool, bool, struct buf **,
67 struct quota2_entry **);
68 static int getq2h(struct ulfsmount *, int, struct buf **,
69 struct quota2_header **, int);
70 static int getq2e(struct ulfsmount *, int, daddr_t, int, struct buf **,
71 struct quota2_entry **, int);
72 static int quota2_walk_list(struct ulfsmount *, struct buf *, int,
73 uint64_t *, int, void *,
74 int (*func)(struct ulfsmount *, uint64_t *, struct quota2_entry *,
75 uint64_t, void *));
76
77 static const char *limnames[] = INITQLNAMES;
78
79 static void
80 quota2_dict_update_q2e_limits(int objtype, const struct quotaval *val,
81 struct quota2_entry *q2e)
82 {
83 /* make sure we can index q2e_val[] by the fs-independent objtype */
84 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
85 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
86
87 q2e->q2e_val[objtype].q2v_hardlimit = val->qv_hardlimit;
88 q2e->q2e_val[objtype].q2v_softlimit = val->qv_softlimit;
89 q2e->q2e_val[objtype].q2v_grace = val->qv_grace;
90 }
91
92 /*
93 * Convert internal representation to FS-independent representation.
94 * (Note that while the two types are currently identical, the
95 * internal representation is an on-disk struct and the FS-independent
96 * representation is not, and they might diverge in the future.)
97 */
98 static void
99 q2val_to_quotaval(struct quota2_val *q2v, struct quotaval *qv)
100 {
101 qv->qv_softlimit = q2v->q2v_softlimit;
102 qv->qv_hardlimit = q2v->q2v_hardlimit;
103 qv->qv_usage = q2v->q2v_cur;
104 qv->qv_expiretime = q2v->q2v_time;
105 qv->qv_grace = q2v->q2v_grace;
106 }
107
108 /*
109 * Convert a quota2entry and default-flag to the FS-independent
110 * representation.
111 */
112 static void
113 q2e_to_quotaval(struct quota2_entry *q2e, int def,
114 id_t *id, int objtype, struct quotaval *ret)
115 {
116 if (def) {
117 *id = QUOTA_DEFAULTID;
118 } else {
119 *id = q2e->q2e_uid;
120 }
121
122 KASSERT(objtype >= 0 && objtype < N_QL);
123 q2val_to_quotaval(&q2e->q2e_val[objtype], ret);
124 }
125
126
127 static int
128 quota2_bwrite(struct mount *mp, struct buf *bp)
129 {
130 if (mp->mnt_flag & MNT_SYNCHRONOUS)
131 return bwrite(bp);
132 else {
133 bdwrite(bp);
134 return 0;
135 }
136 }
137
138 static int
139 getq2h(struct ulfsmount *ump, int type,
140 struct buf **bpp, struct quota2_header **q2hp, int flags)
141 {
142 #ifdef LFS_EI
143 const int needswap = ULFS_MPNEEDSWAP(ump);
144 #endif
145 int error;
146 struct buf *bp;
147 struct quota2_header *q2h;
148
149 KASSERT(mutex_owned(&lfs_dqlock));
150 error = bread(ump->um_quotas[type], 0, ump->umq2_bsize,
151 ump->um_cred[type], flags, &bp);
152 if (error)
153 return error;
154 if (bp->b_resid != 0)
155 panic("dq2get: %s quota file truncated", lfs_quotatypes[type]);
156
157 q2h = (void *)bp->b_data;
158 if (ulfs_rw32(q2h->q2h_magic_number, needswap) != Q2_HEAD_MAGIC ||
159 q2h->q2h_type != type)
160 panic("dq2get: corrupted %s quota header", lfs_quotatypes[type]);
161 *bpp = bp;
162 *q2hp = q2h;
163 return 0;
164 }
165
166 static int
167 getq2e(struct ulfsmount *ump, int type, daddr_t lblkno, int blkoffset,
168 struct buf **bpp, struct quota2_entry **q2ep, int flags)
169 {
170 int error;
171 struct buf *bp;
172
173 if (blkoffset & (sizeof(uint64_t) - 1)) {
174 panic("dq2get: %s quota file corrupted",
175 lfs_quotatypes[type]);
176 }
177 error = bread(ump->um_quotas[type], lblkno, ump->umq2_bsize,
178 ump->um_cred[type], flags, &bp);
179 if (error)
180 return error;
181 if (bp->b_resid != 0) {
182 panic("dq2get: %s quota file corrupted",
183 lfs_quotatypes[type]);
184 }
185 *q2ep = (void *)((char *)bp->b_data + blkoffset);
186 *bpp = bp;
187 return 0;
188 }
189
190 /* walk a quota entry list, calling the callback for each entry */
191 #define Q2WL_ABORT 0x10000000
192
193 static int
194 quota2_walk_list(struct ulfsmount *ump, struct buf *hbp, int type,
195 uint64_t *offp, int flags, void *a,
196 int (*func)(struct ulfsmount *, uint64_t *, struct quota2_entry *, uint64_t, void *))
197 {
198 #ifdef LFS_EI
199 const int needswap = ULFS_MPNEEDSWAP(ump);
200 #endif
201 daddr_t off = ulfs_rw64(*offp, needswap);
202 struct buf *bp, *obp = hbp;
203 int ret = 0, ret2 = 0;
204 struct quota2_entry *q2e;
205 daddr_t lblkno, blkoff, olblkno = 0;
206
207 KASSERT(mutex_owner(&lfs_dqlock));
208
209 while (off != 0) {
210 lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
211 blkoff = (off & ump->umq2_bmask);
212 if (lblkno == 0) {
213 /* in the header block */
214 bp = hbp;
215 } else if (lblkno == olblkno) {
216 /* still in the same buf */
217 bp = obp;
218 } else {
219 ret = bread(ump->um_quotas[type], lblkno,
220 ump->umq2_bsize,
221 ump->um_cred[type], flags, &bp);
222 if (ret)
223 return ret;
224 if (bp->b_resid != 0) {
225 panic("quota2_walk_list: %s quota file corrupted",
226 lfs_quotatypes[type]);
227 }
228 }
229 q2e = (void *)((char *)(bp->b_data) + blkoff);
230 ret = (*func)(ump, offp, q2e, off, a);
231 if (off != ulfs_rw64(*offp, needswap)) {
232 /* callback changed parent's pointer, redo */
233 off = ulfs_rw64(*offp, needswap);
234 if (bp != hbp && bp != obp)
235 ret2 = bwrite(bp);
236 } else {
237 /* parent if now current */
238 if (obp != bp && obp != hbp) {
239 if (flags & B_MODIFY)
240 ret2 = bwrite(obp);
241 else
242 brelse(obp, 0);
243 }
244 obp = bp;
245 olblkno = lblkno;
246 offp = &(q2e->q2e_next);
247 off = ulfs_rw64(*offp, needswap);
248 }
249 if (ret)
250 break;
251 if (ret2) {
252 ret = ret2;
253 break;
254 }
255 }
256 if (obp != hbp) {
257 if (flags & B_MODIFY)
258 ret2 = bwrite(obp);
259 else
260 brelse(obp, 0);
261 }
262 if (ret & Q2WL_ABORT)
263 return 0;
264 if (ret == 0)
265 return ret2;
266 return ret;
267 }
268
269 int
270 lfsquota2_umount(struct mount *mp, int flags)
271 {
272 int i, error;
273 struct ulfsmount *ump = VFSTOULFS(mp);
274
275 if ((ump->um_flags & ULFS_QUOTA2) == 0)
276 return 0;
277
278 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
279 if (ump->um_quotas[i] != NULLVP) {
280 error = vn_close(ump->um_quotas[i], FREAD|FWRITE,
281 ump->um_cred[i]);
282 if (error) {
283 printf("quota2_umount failed: close(%p) %d\n",
284 ump->um_quotas[i], error);
285 return error;
286 }
287 }
288 ump->um_quotas[i] = NULLVP;
289 }
290 return 0;
291 }
292
293 static int
294 quota2_q2ealloc(struct ulfsmount *ump, int type, uid_t uid, struct dquot *dq)
295 {
296 int error, error2;
297 struct buf *hbp, *bp;
298 struct quota2_header *q2h;
299 struct quota2_entry *q2e;
300 daddr_t offset;
301 u_long hash_mask;
302 const int needswap = ULFS_MPNEEDSWAP(ump);
303
304 KASSERT(mutex_owned(&dq->dq_interlock));
305 KASSERT(mutex_owned(&lfs_dqlock));
306 error = getq2h(ump, type, &hbp, &q2h, B_MODIFY);
307 if (error)
308 return error;
309 offset = ulfs_rw64(q2h->q2h_free, needswap);
310 if (offset == 0) {
311 struct vnode *vp = ump->um_quotas[type];
312 struct inode *ip = VTOI(vp);
313 uint64_t size = ip->i_size;
314 /* need to alocate a new disk block */
315 error = ULFS_BALLOC(vp, size, ump->umq2_bsize,
316 ump->um_cred[type], B_CLRBUF | B_SYNC, &bp);
317 if (error) {
318 brelse(hbp, 0);
319 return error;
320 }
321 KASSERT((ip->i_size % ump->umq2_bsize) == 0);
322 ip->i_size += ump->umq2_bsize;
323 DIP_ASSIGN(ip, size, ip->i_size);
324 ip->i_flag |= IN_CHANGE | IN_UPDATE;
325 uvm_vnp_setsize(vp, ip->i_size);
326 lfsquota2_addfreeq2e(q2h, bp->b_data, size, ump->umq2_bsize,
327 needswap);
328 error = bwrite(bp);
329 error2 = ULFS_UPDATE(vp, NULL, NULL, UPDATE_WAIT);
330 if (error || error2) {
331 brelse(hbp, 0);
332 if (error)
333 return error;
334 return error2;
335 }
336 offset = ulfs_rw64(q2h->q2h_free, needswap);
337 KASSERT(offset != 0);
338 }
339 dq->dq2_lblkno = (offset >> ump->um_mountp->mnt_fs_bshift);
340 dq->dq2_blkoff = (offset & ump->umq2_bmask);
341 if (dq->dq2_lblkno == 0) {
342 bp = hbp;
343 q2e = (void *)((char *)bp->b_data + dq->dq2_blkoff);
344 } else {
345 error = getq2e(ump, type, dq->dq2_lblkno,
346 dq->dq2_blkoff, &bp, &q2e, B_MODIFY);
347 if (error) {
348 brelse(hbp, 0);
349 return error;
350 }
351 }
352 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
353 /* remove from free list */
354 q2h->q2h_free = q2e->q2e_next;
355
356 memcpy(q2e, &q2h->q2h_defentry, sizeof(*q2e));
357 q2e->q2e_uid = ulfs_rw32(uid, needswap);
358 /* insert in hash list */
359 q2e->q2e_next = q2h->q2h_entries[uid & hash_mask];
360 q2h->q2h_entries[uid & hash_mask] = ulfs_rw64(offset, needswap);
361 if (hbp != bp) {
362 bwrite(hbp);
363 }
364 bwrite(bp);
365 return 0;
366 }
367
368 static int
369 getinoquota2(struct inode *ip, bool alloc, bool modify, struct buf **bpp,
370 struct quota2_entry **q2ep)
371 {
372 int error;
373 int i;
374 struct dquot *dq;
375 struct ulfsmount *ump = ip->i_ump;
376 u_int32_t ino_ids[ULFS_MAXQUOTAS];
377
378 error = lfs_getinoquota(ip);
379 if (error)
380 return error;
381
382 ino_ids[ULFS_USRQUOTA] = ip->i_uid;
383 ino_ids[ULFS_GRPQUOTA] = ip->i_gid;
384 /* first get the interlock for all dquot */
385 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
386 dq = ip->i_dquot[i];
387 if (dq == NODQUOT)
388 continue;
389 mutex_enter(&dq->dq_interlock);
390 }
391 /* now get the corresponding quota entry */
392 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
393 bpp[i] = NULL;
394 q2ep[i] = NULL;
395 dq = ip->i_dquot[i];
396 if (dq == NODQUOT)
397 continue;
398 if (__predict_false(ump->um_quotas[i] == NULL)) {
399 /*
400 * quotas have been turned off. This can happen
401 * at umount time.
402 */
403 mutex_exit(&dq->dq_interlock);
404 lfs_dqrele(NULLVP, dq);
405 ip->i_dquot[i] = NULL;
406 continue;
407 }
408
409 if ((dq->dq2_lblkno | dq->dq2_blkoff) == 0) {
410 if (!alloc) {
411 continue;
412 }
413 /* need to alloc a new on-disk quot */
414 mutex_enter(&lfs_dqlock);
415 error = quota2_q2ealloc(ump, i, ino_ids[i], dq);
416 mutex_exit(&lfs_dqlock);
417 if (error)
418 return error;
419 }
420 KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
421 error = getq2e(ump, i, dq->dq2_lblkno,
422 dq->dq2_blkoff, &bpp[i], &q2ep[i],
423 modify ? B_MODIFY : 0);
424 if (error)
425 return error;
426 }
427 return 0;
428 }
429
430 __inline static int __unused
431 lfsquota2_check_limit(struct quota2_val *q2v, uint64_t change, time_t now)
432 {
433 return lfsquota_check_limit(q2v->q2v_cur, change, q2v->q2v_softlimit,
434 q2v->q2v_hardlimit, q2v->q2v_time, now);
435 }
436
437 static int
438 quota2_check(struct inode *ip, int vtype, int64_t change, kauth_cred_t cred,
439 int flags)
440 {
441 int error;
442 struct buf *bp[ULFS_MAXQUOTAS];
443 struct quota2_entry *q2e[ULFS_MAXQUOTAS];
444 struct quota2_val *q2vp;
445 struct dquot *dq;
446 uint64_t ncurblks;
447 struct ulfsmount *ump = ip->i_ump;
448 struct mount *mp = ump->um_mountp;
449 const int needswap = ULFS_MPNEEDSWAP(ump);
450 int i;
451
452 if ((error = getinoquota2(ip, change > 0, change != 0, bp, q2e)) != 0)
453 return error;
454 if (change == 0) {
455 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
456 dq = ip->i_dquot[i];
457 if (dq == NODQUOT)
458 continue;
459 if (bp[i])
460 brelse(bp[i], 0);
461 mutex_exit(&dq->dq_interlock);
462 }
463 return 0;
464 }
465 if (change < 0) {
466 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
467 dq = ip->i_dquot[i];
468 if (dq == NODQUOT)
469 continue;
470 if (q2e[i] == NULL) {
471 mutex_exit(&dq->dq_interlock);
472 continue;
473 }
474 q2vp = &q2e[i]->q2e_val[vtype];
475 ncurblks = ulfs_rw64(q2vp->q2v_cur, needswap);
476 if (ncurblks < -change)
477 ncurblks = 0;
478 else
479 ncurblks += change;
480 q2vp->q2v_cur = ulfs_rw64(ncurblks, needswap);
481 quota2_bwrite(mp, bp[i]);
482 mutex_exit(&dq->dq_interlock);
483 }
484 return 0;
485 }
486 /* see if the allocation is allowed */
487 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
488 struct quota2_val q2v;
489 int ql_stat;
490 dq = ip->i_dquot[i];
491 if (dq == NODQUOT)
492 continue;
493 KASSERT(q2e[i] != NULL);
494 lfsquota2_ulfs_rwq2v(&q2e[i]->q2e_val[vtype], &q2v, needswap);
495 ql_stat = lfsquota2_check_limit(&q2v, change, time_second);
496
497 if ((flags & FORCE) == 0 &&
498 kauth_authorize_system(cred, KAUTH_SYSTEM_FS_QUOTA,
499 KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT,
500 KAUTH_ARG(i), KAUTH_ARG(vtype), NULL) != 0) {
501 /* enforce this limit */
502 switch(QL_STATUS(ql_stat)) {
503 case QL_S_DENY_HARD:
504 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
505 uprintf("\n%s: write failed, %s %s "
506 "limit reached\n",
507 mp->mnt_stat.f_mntonname,
508 lfs_quotatypes[i], limnames[vtype]);
509 dq->dq_flags |= DQ_WARN(vtype);
510 }
511 error = EDQUOT;
512 break;
513 case QL_S_DENY_GRACE:
514 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
515 uprintf("\n%s: write failed, %s %s "
516 "limit reached\n",
517 mp->mnt_stat.f_mntonname,
518 lfs_quotatypes[i], limnames[vtype]);
519 dq->dq_flags |= DQ_WARN(vtype);
520 }
521 error = EDQUOT;
522 break;
523 case QL_S_ALLOW_SOFT:
524 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
525 uprintf("\n%s: warning, %s %s "
526 "quota exceeded\n",
527 mp->mnt_stat.f_mntonname,
528 lfs_quotatypes[i], limnames[vtype]);
529 dq->dq_flags |= DQ_WARN(vtype);
530 }
531 break;
532 }
533 }
534 /*
535 * always do this; we don't know if the allocation will
536 * succed or not in the end. if we don't do the allocation
537 * q2v_time will be ignored anyway
538 */
539 if (ql_stat & QL_F_CROSS) {
540 q2v.q2v_time = time_second + q2v.q2v_grace;
541 lfsquota2_ulfs_rwq2v(&q2v, &q2e[i]->q2e_val[vtype],
542 needswap);
543 }
544 }
545
546 /* now do the allocation if allowed */
547 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
548 dq = ip->i_dquot[i];
549 if (dq == NODQUOT)
550 continue;
551 KASSERT(q2e[i] != NULL);
552 if (error == 0) {
553 q2vp = &q2e[i]->q2e_val[vtype];
554 ncurblks = ulfs_rw64(q2vp->q2v_cur, needswap);
555 q2vp->q2v_cur = ulfs_rw64(ncurblks + change, needswap);
556 quota2_bwrite(mp, bp[i]);
557 } else
558 brelse(bp[i], 0);
559 mutex_exit(&dq->dq_interlock);
560 }
561 return error;
562 }
563
564 int
565 lfs_chkdq2(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
566 {
567 return quota2_check(ip, QL_BLOCK, change, cred, flags);
568 }
569
570 int
571 lfs_chkiq2(struct inode *ip, int32_t change, kauth_cred_t cred, int flags)
572 {
573 return quota2_check(ip, QL_FILE, change, cred, flags);
574 }
575
576 int
577 lfsquota2_handle_cmd_put(struct ulfsmount *ump, const struct quotakey *key,
578 const struct quotaval *val)
579 {
580 int error;
581 struct dquot *dq;
582 struct quota2_header *q2h;
583 struct quota2_entry q2e, *q2ep;
584 struct buf *bp;
585 const int needswap = ULFS_MPNEEDSWAP(ump);
586
587 /* make sure we can index by the fs-independent idtype */
588 CTASSERT(QUOTA_IDTYPE_USER == ULFS_USRQUOTA);
589 CTASSERT(QUOTA_IDTYPE_GROUP == ULFS_GRPQUOTA);
590
591 if (ump->um_quotas[key->qk_idtype] == NULLVP)
592 return ENODEV;
593
594 if (key->qk_id == QUOTA_DEFAULTID) {
595 mutex_enter(&lfs_dqlock);
596 error = getq2h(ump, key->qk_idtype, &bp, &q2h, B_MODIFY);
597 if (error) {
598 mutex_exit(&lfs_dqlock);
599 goto out_wapbl;
600 }
601 lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
602 quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
603 lfsquota2_ulfs_rwq2e(&q2e, &q2h->q2h_defentry, needswap);
604 mutex_exit(&lfs_dqlock);
605 quota2_bwrite(ump->um_mountp, bp);
606 goto out_wapbl;
607 }
608
609 error = lfs_dqget(NULLVP, key->qk_id, ump, key->qk_idtype, &dq);
610 if (error)
611 goto out_wapbl;
612
613 mutex_enter(&dq->dq_interlock);
614 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
615 /* need to alloc a new on-disk quot */
616 mutex_enter(&lfs_dqlock);
617 error = quota2_q2ealloc(ump, key->qk_idtype, key->qk_id, dq);
618 mutex_exit(&lfs_dqlock);
619 if (error)
620 goto out_il;
621 }
622 KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
623 error = getq2e(ump, key->qk_idtype, dq->dq2_lblkno,
624 dq->dq2_blkoff, &bp, &q2ep, B_MODIFY);
625 if (error)
626 goto out_il;
627
628 lfsquota2_ulfs_rwq2e(q2ep, &q2e, needswap);
629 quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
630 lfsquota2_ulfs_rwq2e(&q2e, q2ep, needswap);
631 quota2_bwrite(ump->um_mountp, bp);
632
633 out_il:
634 mutex_exit(&dq->dq_interlock);
635 lfs_dqrele(NULLVP, dq);
636 out_wapbl:
637 return error;
638 }
639
640 struct dq2clear_callback {
641 uid_t id;
642 struct dquot *dq;
643 struct quota2_header *q2h;
644 };
645
646 static int
647 dq2clear_callback(struct ulfsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
648 uint64_t off, void *v)
649 {
650 struct dq2clear_callback *c = v;
651 #ifdef LFS_EI
652 const int needswap = ULFS_MPNEEDSWAP(ump);
653 #endif
654 uint64_t myoff;
655
656 if (ulfs_rw32(q2e->q2e_uid, needswap) == c->id) {
657 KASSERT(mutex_owned(&c->dq->dq_interlock));
658 c->dq->dq2_lblkno = 0;
659 c->dq->dq2_blkoff = 0;
660 myoff = *offp;
661 /* remove from hash list */
662 *offp = q2e->q2e_next;
663 /* add to free list */
664 q2e->q2e_next = c->q2h->q2h_free;
665 c->q2h->q2h_free = myoff;
666 return Q2WL_ABORT;
667 }
668 return 0;
669 }
670 int
671 lfsquota2_handle_cmd_delete(struct ulfsmount *ump, const struct quotakey *qk)
672 {
673 int idtype;
674 id_t id;
675 int objtype;
676 int error, i, canfree;
677 struct dquot *dq;
678 struct quota2_header *q2h;
679 struct quota2_entry q2e, *q2ep;
680 struct buf *hbp, *bp;
681 u_long hash_mask;
682 struct dq2clear_callback c;
683
684 idtype = qk->qk_idtype;
685 id = qk->qk_id;
686 objtype = qk->qk_objtype;
687
688 if (ump->um_quotas[idtype] == NULLVP)
689 return ENODEV;
690 if (id == QUOTA_DEFAULTID)
691 return EOPNOTSUPP;
692
693 /* get the default entry before locking the entry's buffer */
694 mutex_enter(&lfs_dqlock);
695 error = getq2h(ump, idtype, &hbp, &q2h, 0);
696 if (error) {
697 mutex_exit(&lfs_dqlock);
698 return error;
699 }
700 /* we'll copy to another disk entry, so no need to swap */
701 memcpy(&q2e, &q2h->q2h_defentry, sizeof(q2e));
702 mutex_exit(&lfs_dqlock);
703 brelse(hbp, 0);
704
705 error = lfs_dqget(NULLVP, id, ump, idtype, &dq);
706 if (error)
707 return error;
708
709 mutex_enter(&dq->dq_interlock);
710 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
711 /* already clear, nothing to do */
712 error = ENOENT;
713 goto out_il;
714 }
715
716 error = getq2e(ump, idtype, dq->dq2_lblkno, dq->dq2_blkoff,
717 &bp, &q2ep, B_MODIFY);
718 if (error)
719 goto out_wapbl;
720
721 /* make sure we can index by the objtype passed in */
722 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
723 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
724
725 /* clear the requested objtype by copying from the default entry */
726 q2ep->q2e_val[objtype].q2v_softlimit =
727 q2e.q2e_val[objtype].q2v_softlimit;
728 q2ep->q2e_val[objtype].q2v_hardlimit =
729 q2e.q2e_val[objtype].q2v_hardlimit;
730 q2ep->q2e_val[objtype].q2v_grace =
731 q2e.q2e_val[objtype].q2v_grace;
732 q2ep->q2e_val[objtype].q2v_time = 0;
733
734 /* if this entry now contains no information, we can free it */
735 canfree = 1;
736 for (i = 0; i < N_QL; i++) {
737 if (q2ep->q2e_val[i].q2v_cur != 0 ||
738 (q2ep->q2e_val[i].q2v_softlimit !=
739 q2e.q2e_val[i].q2v_softlimit) ||
740 (q2ep->q2e_val[i].q2v_hardlimit !=
741 q2e.q2e_val[i].q2v_hardlimit) ||
742 (q2ep->q2e_val[i].q2v_grace !=
743 q2e.q2e_val[i].q2v_grace)) {
744 canfree = 0;
745 break;
746 }
747 /* note: do not need to check q2v_time */
748 }
749
750 if (canfree == 0) {
751 quota2_bwrite(ump->um_mountp, bp);
752 goto out_wapbl;
753 }
754 /* we can free it. release bp so we can walk the list */
755 brelse(bp, 0);
756 mutex_enter(&lfs_dqlock);
757 error = getq2h(ump, idtype, &hbp, &q2h, 0);
758 if (error)
759 goto out_dqlock;
760
761 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
762 c.dq = dq;
763 c.id = id;
764 c.q2h = q2h;
765 error = quota2_walk_list(ump, hbp, idtype,
766 &q2h->q2h_entries[id & hash_mask], B_MODIFY, &c,
767 dq2clear_callback);
768
769 bwrite(hbp);
770
771 out_dqlock:
772 mutex_exit(&lfs_dqlock);
773 out_wapbl:
774 out_il:
775 mutex_exit(&dq->dq_interlock);
776 lfs_dqrele(NULLVP, dq);
777 return error;
778 }
779
780 static int
781 quota2_fetch_q2e(struct ulfsmount *ump, const struct quotakey *qk,
782 struct quota2_entry *ret)
783 {
784 struct dquot *dq;
785 int error;
786 struct quota2_entry *q2ep;
787 struct buf *bp;
788 const int needswap = ULFS_MPNEEDSWAP(ump);
789
790 error = lfs_dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
791 if (error)
792 return error;
793
794 mutex_enter(&dq->dq_interlock);
795 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
796 mutex_exit(&dq->dq_interlock);
797 lfs_dqrele(NULLVP, dq);
798 return ENOENT;
799 }
800 error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
801 &bp, &q2ep, 0);
802 if (error) {
803 mutex_exit(&dq->dq_interlock);
804 lfs_dqrele(NULLVP, dq);
805 return error;
806 }
807 lfsquota2_ulfs_rwq2e(q2ep, ret, needswap);
808 brelse(bp, 0);
809 mutex_exit(&dq->dq_interlock);
810 lfs_dqrele(NULLVP, dq);
811
812 return 0;
813 }
814
815 static int
816 quota2_fetch_quotaval(struct ulfsmount *ump, const struct quotakey *qk,
817 struct quotaval *ret)
818 {
819 struct dquot *dq;
820 int error;
821 struct quota2_entry *q2ep, q2e;
822 struct buf *bp;
823 const int needswap = ULFS_MPNEEDSWAP(ump);
824 id_t id2;
825
826 error = lfs_dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
827 if (error)
828 return error;
829
830 mutex_enter(&dq->dq_interlock);
831 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
832 mutex_exit(&dq->dq_interlock);
833 lfs_dqrele(NULLVP, dq);
834 return ENOENT;
835 }
836 error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
837 &bp, &q2ep, 0);
838 if (error) {
839 mutex_exit(&dq->dq_interlock);
840 lfs_dqrele(NULLVP, dq);
841 return error;
842 }
843 lfsquota2_ulfs_rwq2e(q2ep, &q2e, needswap);
844 brelse(bp, 0);
845 mutex_exit(&dq->dq_interlock);
846 lfs_dqrele(NULLVP, dq);
847
848 q2e_to_quotaval(&q2e, 0, &id2, qk->qk_objtype, ret);
849 KASSERT(id2 == qk->qk_id);
850 return 0;
851 }
852
853 int
854 lfsquota2_handle_cmd_get(struct ulfsmount *ump, const struct quotakey *qk,
855 struct quotaval *qv)
856 {
857 int error;
858 struct quota2_header *q2h;
859 struct quota2_entry q2e;
860 struct buf *bp;
861 const int needswap = ULFS_MPNEEDSWAP(ump);
862 id_t id2;
863
864 /*
865 * Make sure the FS-independent codes match the internal ones,
866 * so we can use the passed-in objtype without having to
867 * convert it explicitly to QL_BLOCK/QL_FILE.
868 */
869 CTASSERT(QL_BLOCK == QUOTA_OBJTYPE_BLOCKS);
870 CTASSERT(QL_FILE == QUOTA_OBJTYPE_FILES);
871 CTASSERT(N_QL == 2);
872
873 if (qk->qk_objtype < 0 || qk->qk_objtype >= N_QL) {
874 return EINVAL;
875 }
876
877 if (ump->um_quotas[qk->qk_idtype] == NULLVP)
878 return ENODEV;
879 if (qk->qk_id == QUOTA_DEFAULTID) {
880 mutex_enter(&lfs_dqlock);
881 error = getq2h(ump, qk->qk_idtype, &bp, &q2h, 0);
882 if (error) {
883 mutex_exit(&lfs_dqlock);
884 return error;
885 }
886 lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
887 mutex_exit(&lfs_dqlock);
888 brelse(bp, 0);
889 q2e_to_quotaval(&q2e, qk->qk_id == QUOTA_DEFAULTID, &id2,
890 qk->qk_objtype, qv);
891 (void)id2;
892 } else
893 error = quota2_fetch_quotaval(ump, qk, qv);
894
895 return error;
896 }
897
898 /*
899 * Cursor structure we used.
900 *
901 * This will get stored in userland between calls so we must not assume
902 * it isn't arbitrarily corrupted.
903 */
904 struct ulfsq2_cursor {
905 uint32_t q2c_magic; /* magic number */
906 int q2c_hashsize; /* size of hash table at last go */
907
908 int q2c_users_done; /* true if we've returned all user data */
909 int q2c_groups_done; /* true if we've returned all group data */
910 int q2c_defaults_done; /* true if we've returned the default values */
911 int q2c_hashpos; /* slot to start at in hash table */
912 int q2c_uidpos; /* number of ids we've handled */
913 int q2c_blocks_done; /* true if we've returned the blocks value */
914 };
915
916 /*
917 * State of a single cursorget call, or at least the part of it that
918 * needs to be passed around.
919 */
920 struct q2cursor_state {
921 /* data return pointers */
922 struct quotakey *keys;
923 struct quotaval *vals;
924
925 /* key/value counters */
926 unsigned maxkeyvals;
927 unsigned numkeys; /* number of keys assigned */
928
929 /* ID to key/value conversion state */
930 int skipfirst; /* if true skip first key/value */
931 int skiplast; /* if true skip last key/value */
932
933 /* ID counters */
934 unsigned maxids; /* maximum number of IDs to handle */
935 unsigned numids; /* number of IDs handled */
936 };
937
938 /*
939 * Additional structure for getids callback.
940 */
941 struct q2cursor_getids {
942 struct q2cursor_state *state;
943 int idtype;
944 unsigned skip; /* number of ids to skip over */
945 unsigned new_skip; /* number of ids to skip over next time */
946 unsigned skipped; /* number skipped so far */
947 int stopped; /* true if we stopped quota_walk_list early */
948 };
949
950 /*
951 * Cursor-related functions
952 */
953
954 /* magic number */
955 #define Q2C_MAGIC (0xbeebe111)
956
957 /* extract cursor from caller form */
958 #define Q2CURSOR(qkc) ((struct ulfsq2_cursor *)&qkc->u.qkc_space[0])
959
960 /*
961 * Check that a cursor we're handed is something like valid. If
962 * someone munges it and it still passes these checks, they'll get
963 * partial or odd results back but won't break anything.
964 */
965 static int
966 q2cursor_check(struct ulfsq2_cursor *cursor)
967 {
968 if (cursor->q2c_magic != Q2C_MAGIC) {
969 return EINVAL;
970 }
971 if (cursor->q2c_hashsize < 0) {
972 return EINVAL;
973 }
974
975 if (cursor->q2c_users_done != 0 && cursor->q2c_users_done != 1) {
976 return EINVAL;
977 }
978 if (cursor->q2c_groups_done != 0 && cursor->q2c_groups_done != 1) {
979 return EINVAL;
980 }
981 if (cursor->q2c_defaults_done != 0 && cursor->q2c_defaults_done != 1) {
982 return EINVAL;
983 }
984 if (cursor->q2c_hashpos < 0 || cursor->q2c_uidpos < 0) {
985 return EINVAL;
986 }
987 if (cursor->q2c_blocks_done != 0 && cursor->q2c_blocks_done != 1) {
988 return EINVAL;
989 }
990 return 0;
991 }
992
993 /*
994 * Set up the q2cursor state.
995 */
996 static void
997 q2cursor_initstate(struct q2cursor_state *state, struct quotakey *keys,
998 struct quotaval *vals, unsigned maxkeyvals, int blocks_done)
999 {
1000 state->keys = keys;
1001 state->vals = vals;
1002
1003 state->maxkeyvals = maxkeyvals;
1004 state->numkeys = 0;
1005
1006 /*
1007 * For each ID there are two quotavals to return. If the
1008 * maximum number of entries to return is odd, we might want
1009 * to skip the first quotaval of the first ID, or the last
1010 * quotaval of the last ID, but not both. So the number of IDs
1011 * we want is (up to) half the number of return slots we have,
1012 * rounded up.
1013 */
1014
1015 state->maxids = (state->maxkeyvals + 1) / 2;
1016 state->numids = 0;
1017 if (state->maxkeyvals % 2) {
1018 if (blocks_done) {
1019 state->skipfirst = 1;
1020 state->skiplast = 0;
1021 } else {
1022 state->skipfirst = 0;
1023 state->skiplast = 1;
1024 }
1025 } else {
1026 state->skipfirst = 0;
1027 state->skiplast = 0;
1028 }
1029 }
1030
1031 /*
1032 * Choose which idtype we're going to work on. If doing a full
1033 * iteration, we do users first, then groups, but either might be
1034 * disabled or marked to skip via cursorsetidtype(), so don't make
1035 * silly assumptions.
1036 */
1037 static int
1038 q2cursor_pickidtype(struct ulfsq2_cursor *cursor, int *idtype_ret)
1039 {
1040 if (cursor->q2c_users_done == 0) {
1041 *idtype_ret = QUOTA_IDTYPE_USER;
1042 } else if (cursor->q2c_groups_done == 0) {
1043 *idtype_ret = QUOTA_IDTYPE_GROUP;
1044 } else {
1045 return EAGAIN;
1046 }
1047 return 0;
1048 }
1049
1050 /*
1051 * Add an ID to the current state. Sets up either one or two keys to
1052 * refer to it, depending on whether it's first/last and the setting
1053 * of skipfirst. (skiplast does not need to be explicitly tested)
1054 */
1055 static void
1056 q2cursor_addid(struct q2cursor_state *state, int idtype, id_t id)
1057 {
1058 KASSERT(state->numids < state->maxids);
1059 KASSERT(state->numkeys < state->maxkeyvals);
1060
1061 if (!state->skipfirst || state->numkeys > 0) {
1062 state->keys[state->numkeys].qk_idtype = idtype;
1063 state->keys[state->numkeys].qk_id = id;
1064 state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_BLOCKS;
1065 state->numkeys++;
1066 }
1067 if (state->numkeys < state->maxkeyvals) {
1068 state->keys[state->numkeys].qk_idtype = idtype;
1069 state->keys[state->numkeys].qk_id = id;
1070 state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_FILES;
1071 state->numkeys++;
1072 } else {
1073 KASSERT(state->skiplast);
1074 }
1075 state->numids++;
1076 }
1077
1078 /*
1079 * Callback function for getting IDs. Update counting and call addid.
1080 */
1081 static int
1082 q2cursor_getids_callback(struct ulfsmount *ump, uint64_t *offp,
1083 struct quota2_entry *q2ep, uint64_t off, void *v)
1084 {
1085 struct q2cursor_getids *gi = v;
1086 id_t id;
1087 #ifdef LFS_EI
1088 const int needswap = ULFS_MPNEEDSWAP(ump);
1089 #endif
1090
1091 if (gi->skipped < gi->skip) {
1092 gi->skipped++;
1093 return 0;
1094 }
1095 id = ulfs_rw32(q2ep->q2e_uid, needswap);
1096 q2cursor_addid(gi->state, gi->idtype, id);
1097 gi->new_skip++;
1098 if (gi->state->numids >= gi->state->maxids) {
1099 /* got enough ids, stop now */
1100 gi->stopped = 1;
1101 return Q2WL_ABORT;
1102 }
1103 return 0;
1104 }
1105
1106 /*
1107 * Fill in a batch of quotakeys by scanning one or more hash chains.
1108 */
1109 static int
1110 q2cursor_getkeys(struct ulfsmount *ump, int idtype, struct ulfsq2_cursor *cursor,
1111 struct q2cursor_state *state,
1112 int *hashsize_ret, struct quota2_entry *default_q2e_ret)
1113 {
1114 const int needswap = ULFS_MPNEEDSWAP(ump);
1115 struct buf *hbp;
1116 struct quota2_header *q2h;
1117 int quota2_hash_size;
1118 struct q2cursor_getids gi;
1119 uint64_t offset;
1120 int error;
1121
1122 /*
1123 * Read the header block.
1124 */
1125
1126 mutex_enter(&lfs_dqlock);
1127 error = getq2h(ump, idtype, &hbp, &q2h, 0);
1128 if (error) {
1129 mutex_exit(&lfs_dqlock);
1130 return error;
1131 }
1132
1133 /* if the table size has changed, make the caller start over */
1134 quota2_hash_size = ulfs_rw16(q2h->q2h_hash_size, needswap);
1135 if (cursor->q2c_hashsize == 0) {
1136 cursor->q2c_hashsize = quota2_hash_size;
1137 } else if (cursor->q2c_hashsize != quota2_hash_size) {
1138 error = EDEADLK;
1139 goto scanfail;
1140 }
1141
1142 /* grab the entry with the default values out of the header */
1143 lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, default_q2e_ret, needswap);
1144
1145 /* If we haven't done the defaults yet, that goes first. */
1146 if (cursor->q2c_defaults_done == 0) {
1147 q2cursor_addid(state, idtype, QUOTA_DEFAULTID);
1148 /* if we read both halves, mark it done */
1149 if (state->numids < state->maxids || !state->skiplast) {
1150 cursor->q2c_defaults_done = 1;
1151 }
1152 }
1153
1154 gi.state = state;
1155 gi.idtype = idtype;
1156
1157 while (state->numids < state->maxids) {
1158 if (cursor->q2c_hashpos >= quota2_hash_size) {
1159 /* nothing more left */
1160 break;
1161 }
1162
1163 /* scan this hash chain */
1164 gi.skip = cursor->q2c_uidpos;
1165 gi.new_skip = gi.skip;
1166 gi.skipped = 0;
1167 gi.stopped = 0;
1168 offset = q2h->q2h_entries[cursor->q2c_hashpos];
1169
1170 error = quota2_walk_list(ump, hbp, idtype, &offset, 0, &gi,
1171 q2cursor_getids_callback);
1172 KASSERT(error != Q2WL_ABORT);
1173 if (error) {
1174 break;
1175 }
1176 if (gi.stopped) {
1177 /* callback stopped before reading whole chain */
1178 cursor->q2c_uidpos = gi.new_skip;
1179 /* if we didn't get both halves, back up */
1180 if (state->numids == state->maxids && state->skiplast){
1181 KASSERT(cursor->q2c_uidpos > 0);
1182 cursor->q2c_uidpos--;
1183 }
1184 } else {
1185 /* read whole chain */
1186 /* if we got both halves of the last id, advance */
1187 if (state->numids < state->maxids || !state->skiplast){
1188 cursor->q2c_uidpos = 0;
1189 cursor->q2c_hashpos++;
1190 }
1191 }
1192 }
1193
1194 scanfail:
1195 mutex_exit(&lfs_dqlock);
1196 brelse(hbp, 0);
1197 if (error)
1198 return error;
1199
1200 *hashsize_ret = quota2_hash_size;
1201 return 0;
1202 }
1203
1204 /*
1205 * Fetch the quotavals for the quotakeys.
1206 */
1207 static int
1208 q2cursor_getvals(struct ulfsmount *ump, struct q2cursor_state *state,
1209 const struct quota2_entry *default_q2e)
1210 {
1211 int hasid;
1212 id_t loadedid, id;
1213 unsigned pos;
1214 struct quota2_entry q2e;
1215 int objtype;
1216 int error;
1217
1218 hasid = 0;
1219 loadedid = 0;
1220 for (pos = 0; pos < state->numkeys; pos++) {
1221 id = state->keys[pos].qk_id;
1222 if (!hasid || id != loadedid) {
1223 hasid = 1;
1224 loadedid = id;
1225 if (id == QUOTA_DEFAULTID) {
1226 q2e = *default_q2e;
1227 } else {
1228 error = quota2_fetch_q2e(ump,
1229 &state->keys[pos],
1230 &q2e);
1231 if (error == ENOENT) {
1232 /* something changed - start over */
1233 error = EDEADLK;
1234 }
1235 if (error) {
1236 return error;
1237 }
1238 }
1239 }
1240
1241
1242 objtype = state->keys[pos].qk_objtype;
1243 KASSERT(objtype >= 0 && objtype < N_QL);
1244 q2val_to_quotaval(&q2e.q2e_val[objtype], &state->vals[pos]);
1245 }
1246
1247 return 0;
1248 }
1249
1250 /*
1251 * Handle cursorget.
1252 *
1253 * We can't just read keys and values directly, because we can't walk
1254 * the list with qdlock and grab dq_interlock to read the entries at
1255 * the same time. So we're going to do two passes: one to figure out
1256 * which IDs we want and fill in the keys, and then a second to use
1257 * the keys to fetch the values.
1258 */
1259 int
1260 lfsquota2_handle_cmd_cursorget(struct ulfsmount *ump, struct quotakcursor *qkc,
1261 struct quotakey *keys, struct quotaval *vals, unsigned maxreturn,
1262 unsigned *ret)
1263 {
1264 int error;
1265 struct ulfsq2_cursor *cursor;
1266 struct ulfsq2_cursor newcursor;
1267 struct q2cursor_state state;
1268 struct quota2_entry default_q2e;
1269 int idtype;
1270 int quota2_hash_size;
1271
1272 /*
1273 * Convert and validate the cursor.
1274 */
1275 cursor = Q2CURSOR(qkc);
1276 error = q2cursor_check(cursor);
1277 if (error) {
1278 return error;
1279 }
1280
1281 /*
1282 * Make sure our on-disk codes match the values of the
1283 * FS-independent ones. This avoids the need for explicit
1284 * conversion (which would be a NOP anyway and thus easily
1285 * left out or called in the wrong places...)
1286 */
1287 CTASSERT(QUOTA_IDTYPE_USER == ULFS_USRQUOTA);
1288 CTASSERT(QUOTA_IDTYPE_GROUP == ULFS_GRPQUOTA);
1289 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
1290 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
1291
1292 /*
1293 * If some of the idtypes aren't configured/enabled, arrange
1294 * to skip over them.
1295 */
1296 if (cursor->q2c_users_done == 0 &&
1297 ump->um_quotas[ULFS_USRQUOTA] == NULLVP) {
1298 cursor->q2c_users_done = 1;
1299 }
1300 if (cursor->q2c_groups_done == 0 &&
1301 ump->um_quotas[ULFS_GRPQUOTA] == NULLVP) {
1302 cursor->q2c_groups_done = 1;
1303 }
1304
1305 /* Loop over, potentially, both idtypes */
1306 while (1) {
1307
1308 /* Choose id type */
1309 error = q2cursor_pickidtype(cursor, &idtype);
1310 if (error == EAGAIN) {
1311 /* nothing more to do, return 0 */
1312 *ret = 0;
1313 return 0;
1314 }
1315 KASSERT(ump->um_quotas[idtype] != NULLVP);
1316
1317 /*
1318 * Initialize the per-call iteration state. Copy the
1319 * cursor state so we can update it in place but back
1320 * out on error.
1321 */
1322 q2cursor_initstate(&state, keys, vals, maxreturn,
1323 cursor->q2c_blocks_done);
1324 newcursor = *cursor;
1325
1326 /* Assign keys */
1327 error = q2cursor_getkeys(ump, idtype, &newcursor, &state,
1328 "a2_hash_size, &default_q2e);
1329 if (error) {
1330 return error;
1331 }
1332
1333 /* Now fill in the values. */
1334 error = q2cursor_getvals(ump, &state, &default_q2e);
1335 if (error) {
1336 return error;
1337 }
1338
1339 /*
1340 * Now that we aren't going to fail and lose what we
1341 * did so far, we can update the cursor state.
1342 */
1343
1344 if (newcursor.q2c_hashpos >= quota2_hash_size) {
1345 if (idtype == QUOTA_IDTYPE_USER)
1346 cursor->q2c_users_done = 1;
1347 else
1348 cursor->q2c_groups_done = 1;
1349
1350 /* start over on another id type */
1351 cursor->q2c_hashsize = 0;
1352 cursor->q2c_defaults_done = 0;
1353 cursor->q2c_hashpos = 0;
1354 cursor->q2c_uidpos = 0;
1355 cursor->q2c_blocks_done = 0;
1356 } else {
1357 *cursor = newcursor;
1358 cursor->q2c_blocks_done = state.skiplast;
1359 }
1360
1361 /*
1362 * If we have something to return, return it.
1363 * Otherwise, continue to the other idtype, if any,
1364 * and only return zero at end of iteration.
1365 */
1366 if (state.numkeys > 0) {
1367 break;
1368 }
1369 }
1370
1371 *ret = state.numkeys;
1372 return 0;
1373 }
1374
1375 int
1376 lfsquota2_handle_cmd_cursoropen(struct ulfsmount *ump, struct quotakcursor *qkc)
1377 {
1378 struct ulfsq2_cursor *cursor;
1379
1380 CTASSERT(sizeof(*cursor) <= sizeof(qkc->u.qkc_space));
1381 cursor = Q2CURSOR(qkc);
1382
1383 cursor->q2c_magic = Q2C_MAGIC;
1384 cursor->q2c_hashsize = 0;
1385
1386 cursor->q2c_users_done = 0;
1387 cursor->q2c_groups_done = 0;
1388 cursor->q2c_defaults_done = 0;
1389 cursor->q2c_hashpos = 0;
1390 cursor->q2c_uidpos = 0;
1391 cursor->q2c_blocks_done = 0;
1392 return 0;
1393 }
1394
1395 int
1396 lfsquota2_handle_cmd_cursorclose(struct ulfsmount *ump, struct quotakcursor *qkc)
1397 {
1398 struct ulfsq2_cursor *cursor;
1399 int error;
1400
1401 cursor = Q2CURSOR(qkc);
1402 error = q2cursor_check(cursor);
1403 if (error) {
1404 return error;
1405 }
1406
1407 /* nothing to do */
1408
1409 return 0;
1410 }
1411
1412 int
1413 lfsquota2_handle_cmd_cursorskipidtype(struct ulfsmount *ump,
1414 struct quotakcursor *qkc, int idtype)
1415 {
1416 struct ulfsq2_cursor *cursor;
1417 int error;
1418
1419 cursor = Q2CURSOR(qkc);
1420 error = q2cursor_check(cursor);
1421 if (error) {
1422 return error;
1423 }
1424
1425 switch (idtype) {
1426 case QUOTA_IDTYPE_USER:
1427 cursor->q2c_users_done = 1;
1428 break;
1429 case QUOTA_IDTYPE_GROUP:
1430 cursor->q2c_groups_done = 1;
1431 break;
1432 default:
1433 return EINVAL;
1434 }
1435
1436 return 0;
1437 }
1438
1439 int
1440 lfsquota2_handle_cmd_cursoratend(struct ulfsmount *ump, struct quotakcursor *qkc,
1441 int *ret)
1442 {
1443 struct ulfsq2_cursor *cursor;
1444 int error;
1445
1446 cursor = Q2CURSOR(qkc);
1447 error = q2cursor_check(cursor);
1448 if (error) {
1449 return error;
1450 }
1451
1452 *ret = (cursor->q2c_users_done && cursor->q2c_groups_done);
1453 return 0;
1454 }
1455
1456 int
1457 lfsquota2_handle_cmd_cursorrewind(struct ulfsmount *ump, struct quotakcursor *qkc)
1458 {
1459 struct ulfsq2_cursor *cursor;
1460 int error;
1461
1462 cursor = Q2CURSOR(qkc);
1463 error = q2cursor_check(cursor);
1464 if (error) {
1465 return error;
1466 }
1467
1468 cursor->q2c_hashsize = 0;
1469
1470 cursor->q2c_users_done = 0;
1471 cursor->q2c_groups_done = 0;
1472 cursor->q2c_defaults_done = 0;
1473 cursor->q2c_hashpos = 0;
1474 cursor->q2c_uidpos = 0;
1475 cursor->q2c_blocks_done = 0;
1476
1477 return 0;
1478 }
1479
1480 int
1481 lfs_q2sync(struct mount *mp)
1482 {
1483 return 0;
1484 }
1485
1486 struct dq2get_callback {
1487 uid_t id;
1488 struct dquot *dq;
1489 };
1490
1491 static int
1492 dq2get_callback(struct ulfsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
1493 uint64_t off, void *v)
1494 {
1495 struct dq2get_callback *c = v;
1496 daddr_t lblkno;
1497 int blkoff;
1498 #ifdef LFS_EI
1499 const int needswap = ULFS_MPNEEDSWAP(ump);
1500 #endif
1501
1502 if (ulfs_rw32(q2e->q2e_uid, needswap) == c->id) {
1503 KASSERT(mutex_owned(&c->dq->dq_interlock));
1504 lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
1505 blkoff = (off & ump->umq2_bmask);
1506 c->dq->dq2_lblkno = lblkno;
1507 c->dq->dq2_blkoff = blkoff;
1508 return Q2WL_ABORT;
1509 }
1510 return 0;
1511 }
1512
1513 int
1514 lfs_dq2get(struct vnode *dqvp, u_long id, struct ulfsmount *ump, int type,
1515 struct dquot *dq)
1516 {
1517 struct buf *bp;
1518 struct quota2_header *q2h;
1519 int error;
1520 daddr_t offset;
1521 u_long hash_mask;
1522 struct dq2get_callback c = {
1523 .id = id,
1524 .dq = dq
1525 };
1526
1527 KASSERT(mutex_owned(&dq->dq_interlock));
1528 mutex_enter(&lfs_dqlock);
1529 error = getq2h(ump, type, &bp, &q2h, 0);
1530 if (error)
1531 goto out_mutex;
1532 /* look for our entry */
1533 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
1534 offset = q2h->q2h_entries[id & hash_mask];
1535 error = quota2_walk_list(ump, bp, type, &offset, 0, (void *)&c,
1536 dq2get_callback);
1537 brelse(bp, 0);
1538 out_mutex:
1539 mutex_exit(&lfs_dqlock);
1540 return error;
1541 }
1542
1543 int
1544 lfs_dq2sync(struct vnode *vp, struct dquot *dq)
1545 {
1546 return 0;
1547 }
1548