ulfs_quota2.c revision 1.32 1 /* $NetBSD: ulfs_quota2.c,v 1.32 2020/01/17 20:08:10 ad Exp $ */
2 /* from NetBSD: ufs_quota2.c,v 1.40 2015/03/28 19:24:05 maxv Exp Exp */
3 /* from NetBSD: ffs_quota2.c,v 1.5 2015/02/22 14:12:48 maxv Exp */
4
5 /*-
6 * Copyright (c) 2010 Manuel Bouyer
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: ulfs_quota2.c,v 1.32 2020/01/17 20:08:10 ad Exp $");
33
34 #include <sys/buf.h>
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/namei.h>
39 #include <sys/file.h>
40 #include <sys/proc.h>
41 #include <sys/vnode.h>
42 #include <sys/mount.h>
43 #include <sys/kauth.h>
44 #include <sys/quota.h>
45 #include <sys/quotactl.h>
46 #include <sys/timevar.h>
47
48 #include <ufs/lfs/lfs.h>
49 #include <ufs/lfs/lfs_accessors.h>
50 #include <ufs/lfs/lfs_extern.h>
51
52 #include <ufs/lfs/ulfs_quota2.h>
53 #include <ufs/lfs/ulfs_inode.h>
54 #include <ufs/lfs/ulfsmount.h>
55 #include <ufs/lfs/ulfs_bswap.h>
56 #include <ufs/lfs/ulfs_extern.h>
57 #include <ufs/lfs/ulfs_quota.h>
58
59 /*
60 * LOCKING:
61 * Data in the entries are protected by the associated struct dquot's
62 * dq_interlock (this means we can't read or change a quota entry without
63 * grabing a dquot for it).
64 * The header and lists (including pointers in the data entries, and q2e_uid)
65 * are protected by the global dqlock.
66 * the locking order is dq_interlock -> dqlock
67 */
68
69 static int quota2_bwrite(struct mount *, struct buf *);
70 static int getinoquota2(struct inode *, bool, bool, struct buf **,
71 struct quota2_entry **);
72 static int getq2h(struct ulfsmount *, int, struct buf **,
73 struct quota2_header **, int);
74 static int getq2e(struct ulfsmount *, int, daddr_t, int, struct buf **,
75 struct quota2_entry **, int);
76 static int quota2_walk_list(struct ulfsmount *, struct buf *, int,
77 uint64_t *, int, void *,
78 int (*func)(struct ulfsmount *, uint64_t *, struct quota2_entry *,
79 uint64_t, void *));
80
81 static const char *limnames[] = INITQLNAMES;
82
83 static void
84 quota2_dict_update_q2e_limits(int objtype, const struct quotaval *val,
85 struct quota2_entry *q2e)
86 {
87 /* make sure we can index q2e_val[] by the fs-independent objtype */
88 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
89 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
90
91 q2e->q2e_val[objtype].q2v_hardlimit = val->qv_hardlimit;
92 q2e->q2e_val[objtype].q2v_softlimit = val->qv_softlimit;
93 q2e->q2e_val[objtype].q2v_grace = val->qv_grace;
94 }
95
96 /*
97 * Convert internal representation to FS-independent representation.
98 * (Note that while the two types are currently identical, the
99 * internal representation is an on-disk struct and the FS-independent
100 * representation is not, and they might diverge in the future.)
101 */
102 static void
103 q2val_to_quotaval(struct quota2_val *q2v, struct quotaval *qv)
104 {
105 qv->qv_softlimit = q2v->q2v_softlimit;
106 qv->qv_hardlimit = q2v->q2v_hardlimit;
107 qv->qv_usage = q2v->q2v_cur;
108 qv->qv_expiretime = q2v->q2v_time;
109 qv->qv_grace = q2v->q2v_grace;
110 }
111
112 /*
113 * Convert a quota2entry and default-flag to the FS-independent
114 * representation.
115 */
116 static void
117 q2e_to_quotaval(struct quota2_entry *q2e, int def,
118 id_t *id, int objtype, struct quotaval *ret)
119 {
120 if (def) {
121 *id = QUOTA_DEFAULTID;
122 } else {
123 *id = q2e->q2e_uid;
124 }
125
126 KASSERT(objtype >= 0 && objtype < N_QL);
127 q2val_to_quotaval(&q2e->q2e_val[objtype], ret);
128 }
129
130
131 static int
132 quota2_bwrite(struct mount *mp, struct buf *bp)
133 {
134 if (mp->mnt_flag & MNT_SYNCHRONOUS)
135 return bwrite(bp);
136 else {
137 bdwrite(bp);
138 return 0;
139 }
140 }
141
142 static int
143 getq2h(struct ulfsmount *ump, int type,
144 struct buf **bpp, struct quota2_header **q2hp, int flags)
145 {
146 struct lfs *fs = ump->um_lfs;
147 const int needswap = ULFS_MPNEEDSWAP(fs);
148 int error;
149 struct buf *bp;
150 struct quota2_header *q2h;
151
152 KASSERT(mutex_owned(&lfs_dqlock));
153 error = bread(ump->um_quotas[type], 0, ump->umq2_bsize, flags, &bp);
154 if (error)
155 return error;
156 if (bp->b_resid != 0)
157 panic("dq2get: %s quota file truncated", lfs_quotatypes[type]);
158
159 q2h = (void *)bp->b_data;
160 if (ulfs_rw32(q2h->q2h_magic_number, needswap) != Q2_HEAD_MAGIC ||
161 q2h->q2h_type != type)
162 panic("dq2get: corrupted %s quota header", lfs_quotatypes[type]);
163 *bpp = bp;
164 *q2hp = q2h;
165 return 0;
166 }
167
168 static int
169 getq2e(struct ulfsmount *ump, int type, daddr_t lblkno, int blkoffset,
170 struct buf **bpp, struct quota2_entry **q2ep, int flags)
171 {
172 int error;
173 struct buf *bp;
174
175 if (blkoffset & (sizeof(uint64_t) - 1)) {
176 panic("dq2get: %s quota file corrupted",
177 lfs_quotatypes[type]);
178 }
179 error = bread(ump->um_quotas[type], lblkno, ump->umq2_bsize, flags, &bp);
180 if (error)
181 return error;
182 if (bp->b_resid != 0) {
183 panic("dq2get: %s quota file corrupted",
184 lfs_quotatypes[type]);
185 }
186 *q2ep = (void *)((char *)bp->b_data + blkoffset);
187 *bpp = bp;
188 return 0;
189 }
190
191 /* walk a quota entry list, calling the callback for each entry */
192 #define Q2WL_ABORT 0x10000000
193
194 static int
195 quota2_walk_list(struct ulfsmount *ump, struct buf *hbp, int type,
196 uint64_t *offp, int flags, void *a,
197 int (*func)(struct ulfsmount *, uint64_t *, struct quota2_entry *, uint64_t, void *))
198 {
199 struct lfs *fs = ump->um_lfs;
200 const int needswap = ULFS_MPNEEDSWAP(fs);
201 daddr_t off = ulfs_rw64(*offp, needswap);
202 struct buf *bp, *obp = hbp;
203 int ret = 0, ret2 = 0;
204 struct quota2_entry *q2e;
205 daddr_t lblkno, blkoff, olblkno = 0;
206
207 KASSERT(mutex_owned(&lfs_dqlock));
208
209 while (off != 0) {
210 lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
211 blkoff = (off & ump->umq2_bmask);
212 if (lblkno == 0) {
213 /* in the header block */
214 bp = hbp;
215 } else if (lblkno == olblkno) {
216 /* still in the same buf */
217 bp = obp;
218 } else {
219 ret = bread(ump->um_quotas[type], lblkno,
220 ump->umq2_bsize, flags, &bp);
221 if (ret)
222 return ret;
223 if (bp->b_resid != 0) {
224 panic("quota2_walk_list: %s quota file corrupted",
225 lfs_quotatypes[type]);
226 }
227 }
228 q2e = (void *)((char *)(bp->b_data) + blkoff);
229 ret = (*func)(ump, offp, q2e, off, a);
230 if (off != ulfs_rw64(*offp, needswap)) {
231 /* callback changed parent's pointer, redo */
232 off = ulfs_rw64(*offp, needswap);
233 if (bp != hbp && bp != obp)
234 ret2 = bwrite(bp);
235 } else {
236 /* parent if now current */
237 if (obp != bp && obp != hbp) {
238 if (flags & B_MODIFY)
239 ret2 = bwrite(obp);
240 else
241 brelse(obp, 0);
242 }
243 obp = bp;
244 olblkno = lblkno;
245 offp = &(q2e->q2e_next);
246 off = ulfs_rw64(*offp, needswap);
247 }
248 if (ret)
249 break;
250 if (ret2) {
251 ret = ret2;
252 break;
253 }
254 }
255 if (obp != hbp) {
256 if (flags & B_MODIFY)
257 ret2 = bwrite(obp);
258 else
259 brelse(obp, 0);
260 }
261 if (ret & Q2WL_ABORT)
262 return 0;
263 if (ret == 0)
264 return ret2;
265 return ret;
266 }
267
268 int
269 lfsquota2_umount(struct mount *mp, int flags)
270 {
271 int i, error;
272 struct ulfsmount *ump = VFSTOULFS(mp);
273 struct lfs *fs = ump->um_lfs;
274
275 if ((fs->um_flags & ULFS_QUOTA2) == 0)
276 return 0;
277
278 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
279 if (ump->um_quotas[i] != NULLVP) {
280 error = vn_close(ump->um_quotas[i], FREAD|FWRITE,
281 ump->um_cred[i]);
282 if (error) {
283 printf("quota2_umount failed: close(%p) %d\n",
284 ump->um_quotas[i], error);
285 return error;
286 }
287 }
288 ump->um_quotas[i] = NULLVP;
289 }
290 return 0;
291 }
292
293 static int
294 quota2_q2ealloc(struct ulfsmount *ump, int type, uid_t uid, struct dquot *dq)
295 {
296 int error, error2;
297 struct buf *hbp, *bp;
298 struct quota2_header *q2h;
299 struct quota2_entry *q2e;
300 daddr_t offset;
301 u_long hash_mask;
302 struct lfs *fs = ump->um_lfs;
303 const int needswap = ULFS_MPNEEDSWAP(fs);
304
305 KASSERT(mutex_owned(&dq->dq_interlock));
306 KASSERT(mutex_owned(&lfs_dqlock));
307 error = getq2h(ump, type, &hbp, &q2h, B_MODIFY);
308 if (error)
309 return error;
310 offset = ulfs_rw64(q2h->q2h_free, needswap);
311 if (offset == 0) {
312 struct vnode *vp = ump->um_quotas[type];
313 struct inode *ip = VTOI(vp);
314 uint64_t size = ip->i_size;
315 /* need to alocate a new disk block */
316 error = lfs_balloc(vp, size, ump->umq2_bsize,
317 ump->um_cred[type], B_CLRBUF | B_SYNC, &bp);
318 if (error) {
319 brelse(hbp, 0);
320 return error;
321 }
322 KASSERT((ip->i_size % ump->umq2_bsize) == 0);
323 ip->i_size += ump->umq2_bsize;
324 DIP_ASSIGN(ip, size, ip->i_size);
325 ip->i_state |= IN_CHANGE | IN_UPDATE;
326 uvm_vnp_setsize(vp, ip->i_size);
327 lfsquota2_addfreeq2e(q2h, bp->b_data, size, ump->umq2_bsize,
328 needswap);
329 error = bwrite(bp);
330 error2 = lfs_update(vp, NULL, NULL, UPDATE_WAIT);
331 if (error || error2) {
332 brelse(hbp, 0);
333 if (error)
334 return error;
335 return error2;
336 }
337 offset = ulfs_rw64(q2h->q2h_free, needswap);
338 KASSERT(offset != 0);
339 }
340 dq->dq2_lblkno = (offset >> ump->um_mountp->mnt_fs_bshift);
341 dq->dq2_blkoff = (offset & ump->umq2_bmask);
342 if (dq->dq2_lblkno == 0) {
343 bp = hbp;
344 q2e = (void *)((char *)bp->b_data + dq->dq2_blkoff);
345 } else {
346 error = getq2e(ump, type, dq->dq2_lblkno,
347 dq->dq2_blkoff, &bp, &q2e, B_MODIFY);
348 if (error) {
349 brelse(hbp, 0);
350 return error;
351 }
352 }
353 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
354 /* remove from free list */
355 q2h->q2h_free = q2e->q2e_next;
356
357 memcpy(q2e, &q2h->q2h_defentry, sizeof(*q2e));
358 q2e->q2e_uid = ulfs_rw32(uid, needswap);
359 /* insert in hash list */
360 q2e->q2e_next = q2h->q2h_entries[uid & hash_mask];
361 q2h->q2h_entries[uid & hash_mask] = ulfs_rw64(offset, needswap);
362 if (hbp != bp) {
363 bwrite(hbp);
364 }
365 bwrite(bp);
366 return 0;
367 }
368
369 static int
370 getinoquota2(struct inode *ip, bool alloc, bool modify, struct buf **bpp,
371 struct quota2_entry **q2ep)
372 {
373 int error;
374 int i;
375 struct dquot *dq;
376 struct ulfsmount *ump = ip->i_ump;
377 u_int32_t ino_ids[ULFS_MAXQUOTAS];
378
379 error = lfs_getinoquota(ip);
380 if (error)
381 return error;
382
383 ino_ids[ULFS_USRQUOTA] = ip->i_uid;
384 ino_ids[ULFS_GRPQUOTA] = ip->i_gid;
385 /* first get the interlock for all dquot */
386 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
387 dq = ip->i_dquot[i];
388 if (dq == NODQUOT)
389 continue;
390 mutex_enter(&dq->dq_interlock);
391 }
392 /* now get the corresponding quota entry */
393 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
394 bpp[i] = NULL;
395 q2ep[i] = NULL;
396 dq = ip->i_dquot[i];
397 if (dq == NODQUOT)
398 continue;
399 if (__predict_false(ump->um_quotas[i] == NULL)) {
400 /*
401 * quotas have been turned off. This can happen
402 * at umount time.
403 */
404 mutex_exit(&dq->dq_interlock);
405 lfs_dqrele(NULLVP, dq);
406 ip->i_dquot[i] = NULL;
407 continue;
408 }
409
410 if ((dq->dq2_lblkno | dq->dq2_blkoff) == 0) {
411 if (!alloc) {
412 continue;
413 }
414 /* need to alloc a new on-disk quot */
415 mutex_enter(&lfs_dqlock);
416 error = quota2_q2ealloc(ump, i, ino_ids[i], dq);
417 mutex_exit(&lfs_dqlock);
418 if (error)
419 return error;
420 }
421 KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
422 error = getq2e(ump, i, dq->dq2_lblkno,
423 dq->dq2_blkoff, &bpp[i], &q2ep[i],
424 modify ? B_MODIFY : 0);
425 if (error)
426 return error;
427 }
428 return 0;
429 }
430
431 __inline static int __unused
432 lfsquota2_check_limit(struct quota2_val *q2v, uint64_t change, time_t now)
433 {
434 return lfsquota_check_limit(q2v->q2v_cur, change, q2v->q2v_softlimit,
435 q2v->q2v_hardlimit, q2v->q2v_time, now);
436 }
437
438 static int
439 quota2_check(struct inode *ip, int vtype, int64_t change, kauth_cred_t cred,
440 int flags)
441 {
442 int error;
443 struct buf *bp[ULFS_MAXQUOTAS];
444 struct quota2_entry *q2e[ULFS_MAXQUOTAS];
445 struct quota2_val *q2vp;
446 struct dquot *dq;
447 uint64_t ncurblks;
448 struct ulfsmount *ump = ip->i_ump;
449 struct lfs *fs = ip->i_lfs;
450 struct mount *mp = ump->um_mountp;
451 const int needswap = ULFS_MPNEEDSWAP(fs);
452 int i;
453
454 if ((error = getinoquota2(ip, change > 0, change != 0, bp, q2e)) != 0)
455 return error;
456 if (change == 0) {
457 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
458 dq = ip->i_dquot[i];
459 if (dq == NODQUOT)
460 continue;
461 if (bp[i])
462 brelse(bp[i], 0);
463 mutex_exit(&dq->dq_interlock);
464 }
465 return 0;
466 }
467 if (change < 0) {
468 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
469 dq = ip->i_dquot[i];
470 if (dq == NODQUOT)
471 continue;
472 if (q2e[i] == NULL) {
473 mutex_exit(&dq->dq_interlock);
474 continue;
475 }
476 q2vp = &q2e[i]->q2e_val[vtype];
477 ncurblks = ulfs_rw64(q2vp->q2v_cur, needswap);
478 if (ncurblks < -change)
479 ncurblks = 0;
480 else
481 ncurblks += change;
482 q2vp->q2v_cur = ulfs_rw64(ncurblks, needswap);
483 quota2_bwrite(mp, bp[i]);
484 mutex_exit(&dq->dq_interlock);
485 }
486 return 0;
487 }
488 /* see if the allocation is allowed */
489 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
490 struct quota2_val q2v;
491 int ql_stat;
492 dq = ip->i_dquot[i];
493 if (dq == NODQUOT)
494 continue;
495 KASSERT(q2e[i] != NULL);
496 lfsquota2_ulfs_rwq2v(&q2e[i]->q2e_val[vtype], &q2v, needswap);
497 ql_stat = lfsquota2_check_limit(&q2v, change, time_second);
498
499 if ((flags & FORCE) == 0 &&
500 kauth_authorize_system(cred, KAUTH_SYSTEM_FS_QUOTA,
501 KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT,
502 KAUTH_ARG(i), KAUTH_ARG(vtype), NULL) != 0) {
503 /* enforce this limit */
504 switch(QL_STATUS(ql_stat)) {
505 case QL_S_DENY_HARD:
506 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
507 uprintf("\n%s: write failed, %s %s "
508 "limit reached\n",
509 mp->mnt_stat.f_mntonname,
510 lfs_quotatypes[i], limnames[vtype]);
511 dq->dq_flags |= DQ_WARN(vtype);
512 }
513 error = EDQUOT;
514 break;
515 case QL_S_DENY_GRACE:
516 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
517 uprintf("\n%s: write failed, %s %s "
518 "limit reached\n",
519 mp->mnt_stat.f_mntonname,
520 lfs_quotatypes[i], limnames[vtype]);
521 dq->dq_flags |= DQ_WARN(vtype);
522 }
523 error = EDQUOT;
524 break;
525 case QL_S_ALLOW_SOFT:
526 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
527 uprintf("\n%s: warning, %s %s "
528 "quota exceeded\n",
529 mp->mnt_stat.f_mntonname,
530 lfs_quotatypes[i], limnames[vtype]);
531 dq->dq_flags |= DQ_WARN(vtype);
532 }
533 break;
534 }
535 }
536 /*
537 * always do this; we don't know if the allocation will
538 * succed or not in the end. if we don't do the allocation
539 * q2v_time will be ignored anyway
540 */
541 if (ql_stat & QL_F_CROSS) {
542 q2v.q2v_time = time_second + q2v.q2v_grace;
543 lfsquota2_ulfs_rwq2v(&q2v, &q2e[i]->q2e_val[vtype],
544 needswap);
545 }
546 }
547
548 /* now do the allocation if allowed */
549 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
550 dq = ip->i_dquot[i];
551 if (dq == NODQUOT)
552 continue;
553 KASSERT(q2e[i] != NULL);
554 if (error == 0) {
555 q2vp = &q2e[i]->q2e_val[vtype];
556 ncurblks = ulfs_rw64(q2vp->q2v_cur, needswap);
557 q2vp->q2v_cur = ulfs_rw64(ncurblks + change, needswap);
558 quota2_bwrite(mp, bp[i]);
559 } else
560 brelse(bp[i], 0);
561 mutex_exit(&dq->dq_interlock);
562 }
563 return error;
564 }
565
566 int
567 lfs_chkdq2(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
568 {
569 return quota2_check(ip, QL_BLOCK, change, cred, flags);
570 }
571
572 int
573 lfs_chkiq2(struct inode *ip, int32_t change, kauth_cred_t cred, int flags)
574 {
575 return quota2_check(ip, QL_FILE, change, cred, flags);
576 }
577
578 int
579 lfsquota2_handle_cmd_put(struct ulfsmount *ump, const struct quotakey *key,
580 const struct quotaval *val)
581 {
582 int error;
583 struct dquot *dq;
584 struct quota2_header *q2h;
585 struct quota2_entry q2e, *q2ep;
586 struct buf *bp;
587 struct lfs *fs = ump->um_lfs;
588 const int needswap = ULFS_MPNEEDSWAP(fs);
589
590 /* make sure we can index by the fs-independent idtype */
591 CTASSERT(QUOTA_IDTYPE_USER == ULFS_USRQUOTA);
592 CTASSERT(QUOTA_IDTYPE_GROUP == ULFS_GRPQUOTA);
593
594 if (ump->um_quotas[key->qk_idtype] == NULLVP)
595 return ENODEV;
596
597 if (key->qk_id == QUOTA_DEFAULTID) {
598 mutex_enter(&lfs_dqlock);
599 error = getq2h(ump, key->qk_idtype, &bp, &q2h, B_MODIFY);
600 if (error) {
601 mutex_exit(&lfs_dqlock);
602 goto out_error;
603 }
604 lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
605 quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
606 lfsquota2_ulfs_rwq2e(&q2e, &q2h->q2h_defentry, needswap);
607 mutex_exit(&lfs_dqlock);
608 quota2_bwrite(ump->um_mountp, bp);
609 goto out_error;
610 }
611
612 error = lfs_dqget(NULLVP, key->qk_id, ump, key->qk_idtype, &dq);
613 if (error)
614 goto out_error;
615
616 mutex_enter(&dq->dq_interlock);
617 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
618 /* need to alloc a new on-disk quot */
619 mutex_enter(&lfs_dqlock);
620 error = quota2_q2ealloc(ump, key->qk_idtype, key->qk_id, dq);
621 mutex_exit(&lfs_dqlock);
622 if (error)
623 goto out_il;
624 }
625 KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
626 error = getq2e(ump, key->qk_idtype, dq->dq2_lblkno,
627 dq->dq2_blkoff, &bp, &q2ep, B_MODIFY);
628 if (error)
629 goto out_il;
630
631 lfsquota2_ulfs_rwq2e(q2ep, &q2e, needswap);
632 /*
633 * Reset time limit if previously had no soft limit or were
634 * under it, but now have a soft limit and are over it.
635 */
636 if (val->qv_softlimit &&
637 q2e.q2e_val[key->qk_objtype].q2v_cur >= val->qv_softlimit &&
638 (q2e.q2e_val[key->qk_objtype].q2v_softlimit == 0 ||
639 q2e.q2e_val[key->qk_objtype].q2v_cur < q2e.q2e_val[key->qk_objtype].q2v_softlimit))
640 q2e.q2e_val[key->qk_objtype].q2v_time = time_second + val->qv_grace;
641 quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
642 lfsquota2_ulfs_rwq2e(&q2e, q2ep, needswap);
643 quota2_bwrite(ump->um_mountp, bp);
644
645 out_il:
646 mutex_exit(&dq->dq_interlock);
647 lfs_dqrele(NULLVP, dq);
648 out_error:
649 return error;
650 }
651
652 struct dq2clear_callback {
653 uid_t id;
654 struct dquot *dq;
655 struct quota2_header *q2h;
656 };
657
658 static int
659 dq2clear_callback(struct ulfsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
660 uint64_t off, void *v)
661 {
662 struct dq2clear_callback *c = v;
663 struct lfs *fs = ump->um_lfs;
664 const int needswap = ULFS_MPNEEDSWAP(fs);
665 uint64_t myoff;
666
667 if (ulfs_rw32(q2e->q2e_uid, needswap) == c->id) {
668 KASSERT(mutex_owned(&c->dq->dq_interlock));
669 c->dq->dq2_lblkno = 0;
670 c->dq->dq2_blkoff = 0;
671 myoff = *offp;
672 /* remove from hash list */
673 *offp = q2e->q2e_next;
674 /* add to free list */
675 q2e->q2e_next = c->q2h->q2h_free;
676 c->q2h->q2h_free = myoff;
677 return Q2WL_ABORT;
678 }
679 return 0;
680 }
681 int
682 lfsquota2_handle_cmd_del(struct ulfsmount *ump, const struct quotakey *qk)
683 {
684 int idtype;
685 id_t id;
686 int objtype;
687 int error, i, canfree;
688 struct dquot *dq;
689 struct quota2_header *q2h;
690 struct quota2_entry q2e, *q2ep;
691 struct buf *hbp, *bp;
692 u_long hash_mask;
693 struct dq2clear_callback c;
694
695 idtype = qk->qk_idtype;
696 id = qk->qk_id;
697 objtype = qk->qk_objtype;
698
699 if (ump->um_quotas[idtype] == NULLVP)
700 return ENODEV;
701 if (id == QUOTA_DEFAULTID)
702 return EOPNOTSUPP;
703
704 /* get the default entry before locking the entry's buffer */
705 mutex_enter(&lfs_dqlock);
706 error = getq2h(ump, idtype, &hbp, &q2h, 0);
707 if (error) {
708 mutex_exit(&lfs_dqlock);
709 return error;
710 }
711 /* we'll copy to another disk entry, so no need to swap */
712 memcpy(&q2e, &q2h->q2h_defentry, sizeof(q2e));
713 mutex_exit(&lfs_dqlock);
714 brelse(hbp, 0);
715
716 error = lfs_dqget(NULLVP, id, ump, idtype, &dq);
717 if (error)
718 return error;
719
720 mutex_enter(&dq->dq_interlock);
721 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
722 /* already clear, nothing to do */
723 error = ENOENT;
724 goto out_il;
725 }
726
727 error = getq2e(ump, idtype, dq->dq2_lblkno, dq->dq2_blkoff,
728 &bp, &q2ep, B_MODIFY);
729 if (error)
730 goto out_error;
731
732 /* make sure we can index by the objtype passed in */
733 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
734 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
735
736 /* clear the requested objtype by copying from the default entry */
737 q2ep->q2e_val[objtype].q2v_softlimit =
738 q2e.q2e_val[objtype].q2v_softlimit;
739 q2ep->q2e_val[objtype].q2v_hardlimit =
740 q2e.q2e_val[objtype].q2v_hardlimit;
741 q2ep->q2e_val[objtype].q2v_grace =
742 q2e.q2e_val[objtype].q2v_grace;
743 q2ep->q2e_val[objtype].q2v_time = 0;
744
745 /* if this entry now contains no information, we can free it */
746 canfree = 1;
747 for (i = 0; i < N_QL; i++) {
748 if (q2ep->q2e_val[i].q2v_cur != 0 ||
749 (q2ep->q2e_val[i].q2v_softlimit !=
750 q2e.q2e_val[i].q2v_softlimit) ||
751 (q2ep->q2e_val[i].q2v_hardlimit !=
752 q2e.q2e_val[i].q2v_hardlimit) ||
753 (q2ep->q2e_val[i].q2v_grace !=
754 q2e.q2e_val[i].q2v_grace)) {
755 canfree = 0;
756 break;
757 }
758 /* note: do not need to check q2v_time */
759 }
760
761 if (canfree == 0) {
762 quota2_bwrite(ump->um_mountp, bp);
763 goto out_error;
764 }
765 /* we can free it. release bp so we can walk the list */
766 brelse(bp, 0);
767 mutex_enter(&lfs_dqlock);
768 error = getq2h(ump, idtype, &hbp, &q2h, 0);
769 if (error)
770 goto out_dqlock;
771
772 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
773 c.dq = dq;
774 c.id = id;
775 c.q2h = q2h;
776 error = quota2_walk_list(ump, hbp, idtype,
777 &q2h->q2h_entries[id & hash_mask], B_MODIFY, &c,
778 dq2clear_callback);
779
780 bwrite(hbp);
781
782 out_dqlock:
783 mutex_exit(&lfs_dqlock);
784 out_error:
785 out_il:
786 mutex_exit(&dq->dq_interlock);
787 lfs_dqrele(NULLVP, dq);
788 return error;
789 }
790
791 static int
792 quota2_fetch_q2e(struct ulfsmount *ump, const struct quotakey *qk,
793 struct quota2_entry *ret)
794 {
795 struct dquot *dq;
796 int error;
797 struct quota2_entry *q2ep;
798 struct buf *bp;
799 struct lfs *fs = ump->um_lfs;
800 const int needswap = ULFS_MPNEEDSWAP(fs);
801
802 error = lfs_dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
803 if (error)
804 return error;
805
806 mutex_enter(&dq->dq_interlock);
807 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
808 mutex_exit(&dq->dq_interlock);
809 lfs_dqrele(NULLVP, dq);
810 return ENOENT;
811 }
812 error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
813 &bp, &q2ep, 0);
814 if (error) {
815 mutex_exit(&dq->dq_interlock);
816 lfs_dqrele(NULLVP, dq);
817 return error;
818 }
819 lfsquota2_ulfs_rwq2e(q2ep, ret, needswap);
820 brelse(bp, 0);
821 mutex_exit(&dq->dq_interlock);
822 lfs_dqrele(NULLVP, dq);
823
824 return 0;
825 }
826
827 static int
828 quota2_fetch_quotaval(struct ulfsmount *ump, const struct quotakey *qk,
829 struct quotaval *ret)
830 {
831 struct dquot *dq;
832 int error;
833 struct quota2_entry *q2ep, q2e;
834 struct buf *bp;
835 struct lfs *fs = ump->um_lfs;
836 const int needswap = ULFS_MPNEEDSWAP(fs);
837 id_t id2;
838
839 error = lfs_dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
840 if (error)
841 return error;
842
843 mutex_enter(&dq->dq_interlock);
844 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
845 mutex_exit(&dq->dq_interlock);
846 lfs_dqrele(NULLVP, dq);
847 return ENOENT;
848 }
849 error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
850 &bp, &q2ep, 0);
851 if (error) {
852 mutex_exit(&dq->dq_interlock);
853 lfs_dqrele(NULLVP, dq);
854 return error;
855 }
856 lfsquota2_ulfs_rwq2e(q2ep, &q2e, needswap);
857 brelse(bp, 0);
858 mutex_exit(&dq->dq_interlock);
859 lfs_dqrele(NULLVP, dq);
860
861 q2e_to_quotaval(&q2e, 0, &id2, qk->qk_objtype, ret);
862 KASSERT(id2 == qk->qk_id);
863 return 0;
864 }
865
866 int
867 lfsquota2_handle_cmd_get(struct ulfsmount *ump, const struct quotakey *qk,
868 struct quotaval *qv)
869 {
870 int error;
871 struct quota2_header *q2h;
872 struct quota2_entry q2e;
873 struct buf *bp;
874 struct lfs *fs = ump->um_lfs;
875 const int needswap = ULFS_MPNEEDSWAP(fs);
876 id_t id2;
877
878 /*
879 * Make sure the FS-independent codes match the internal ones,
880 * so we can use the passed-in objtype without having to
881 * convert it explicitly to QL_BLOCK/QL_FILE.
882 */
883 CTASSERT(QL_BLOCK == QUOTA_OBJTYPE_BLOCKS);
884 CTASSERT(QL_FILE == QUOTA_OBJTYPE_FILES);
885 CTASSERT(N_QL == 2);
886
887 if (qk->qk_objtype < 0 || qk->qk_objtype >= N_QL) {
888 return EINVAL;
889 }
890
891 if (ump->um_quotas[qk->qk_idtype] == NULLVP)
892 return ENODEV;
893 if (qk->qk_id == QUOTA_DEFAULTID) {
894 mutex_enter(&lfs_dqlock);
895 error = getq2h(ump, qk->qk_idtype, &bp, &q2h, 0);
896 if (error) {
897 mutex_exit(&lfs_dqlock);
898 return error;
899 }
900 lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
901 mutex_exit(&lfs_dqlock);
902 brelse(bp, 0);
903 q2e_to_quotaval(&q2e, qk->qk_id == QUOTA_DEFAULTID, &id2,
904 qk->qk_objtype, qv);
905 (void)id2;
906 } else
907 error = quota2_fetch_quotaval(ump, qk, qv);
908
909 return error;
910 }
911
912 /*
913 * Cursor structure we used.
914 *
915 * This will get stored in userland between calls so we must not assume
916 * it isn't arbitrarily corrupted.
917 */
918 struct ulfsq2_cursor {
919 uint32_t q2c_magic; /* magic number */
920 int q2c_hashsize; /* size of hash table at last go */
921
922 int q2c_users_done; /* true if we've returned all user data */
923 int q2c_groups_done; /* true if we've returned all group data */
924 int q2c_defaults_done; /* true if we've returned the default values */
925 int q2c_hashpos; /* slot to start at in hash table */
926 int q2c_uidpos; /* number of ids we've handled */
927 int q2c_blocks_done; /* true if we've returned the blocks value */
928 };
929
930 /*
931 * State of a single cursorget call, or at least the part of it that
932 * needs to be passed around.
933 */
934 struct q2cursor_state {
935 /* data return pointers */
936 struct quotakey *keys;
937 struct quotaval *vals;
938
939 /* key/value counters */
940 unsigned maxkeyvals;
941 unsigned numkeys; /* number of keys assigned */
942
943 /* ID to key/value conversion state */
944 int skipfirst; /* if true skip first key/value */
945 int skiplast; /* if true skip last key/value */
946
947 /* ID counters */
948 unsigned maxids; /* maximum number of IDs to handle */
949 unsigned numids; /* number of IDs handled */
950 };
951
952 /*
953 * Additional structure for getids callback.
954 */
955 struct q2cursor_getids {
956 struct q2cursor_state *state;
957 int idtype;
958 unsigned skip; /* number of ids to skip over */
959 unsigned new_skip; /* number of ids to skip over next time */
960 unsigned skipped; /* number skipped so far */
961 int stopped; /* true if we stopped quota_walk_list early */
962 };
963
964 /*
965 * Cursor-related functions
966 */
967
968 /* magic number */
969 #define Q2C_MAGIC (0xbeebe111)
970
971 /* extract cursor from caller form */
972 #define Q2CURSOR(qkc) ((struct ulfsq2_cursor *)&qkc->u.qkc_space[0])
973
974 /*
975 * Check that a cursor we're handed is something like valid. If
976 * someone munges it and it still passes these checks, they'll get
977 * partial or odd results back but won't break anything.
978 */
979 static int
980 q2cursor_check(struct ulfsq2_cursor *cursor)
981 {
982 if (cursor->q2c_magic != Q2C_MAGIC) {
983 return EINVAL;
984 }
985 if (cursor->q2c_hashsize < 0) {
986 return EINVAL;
987 }
988
989 if (cursor->q2c_users_done != 0 && cursor->q2c_users_done != 1) {
990 return EINVAL;
991 }
992 if (cursor->q2c_groups_done != 0 && cursor->q2c_groups_done != 1) {
993 return EINVAL;
994 }
995 if (cursor->q2c_defaults_done != 0 && cursor->q2c_defaults_done != 1) {
996 return EINVAL;
997 }
998 if (cursor->q2c_hashpos < 0 || cursor->q2c_uidpos < 0) {
999 return EINVAL;
1000 }
1001 if (cursor->q2c_blocks_done != 0 && cursor->q2c_blocks_done != 1) {
1002 return EINVAL;
1003 }
1004 return 0;
1005 }
1006
1007 /*
1008 * Set up the q2cursor state.
1009 */
1010 static void
1011 q2cursor_initstate(struct q2cursor_state *state, struct quotakey *keys,
1012 struct quotaval *vals, unsigned maxkeyvals, int blocks_done)
1013 {
1014 state->keys = keys;
1015 state->vals = vals;
1016
1017 state->maxkeyvals = maxkeyvals;
1018 state->numkeys = 0;
1019
1020 /*
1021 * For each ID there are two quotavals to return. If the
1022 * maximum number of entries to return is odd, we might want
1023 * to skip the first quotaval of the first ID, or the last
1024 * quotaval of the last ID, but not both. So the number of IDs
1025 * we want is (up to) half the number of return slots we have,
1026 * rounded up.
1027 */
1028
1029 state->maxids = (state->maxkeyvals + 1) / 2;
1030 state->numids = 0;
1031 if (state->maxkeyvals % 2) {
1032 if (blocks_done) {
1033 state->skipfirst = 1;
1034 state->skiplast = 0;
1035 } else {
1036 state->skipfirst = 0;
1037 state->skiplast = 1;
1038 }
1039 } else {
1040 state->skipfirst = 0;
1041 state->skiplast = 0;
1042 }
1043 }
1044
1045 /*
1046 * Choose which idtype we're going to work on. If doing a full
1047 * iteration, we do users first, then groups, but either might be
1048 * disabled or marked to skip via cursorsetidtype(), so don't make
1049 * silly assumptions.
1050 */
1051 static int
1052 q2cursor_pickidtype(struct ulfsq2_cursor *cursor, int *idtype_ret)
1053 {
1054 if (cursor->q2c_users_done == 0) {
1055 *idtype_ret = QUOTA_IDTYPE_USER;
1056 } else if (cursor->q2c_groups_done == 0) {
1057 *idtype_ret = QUOTA_IDTYPE_GROUP;
1058 } else {
1059 return EAGAIN;
1060 }
1061 return 0;
1062 }
1063
1064 /*
1065 * Add an ID to the current state. Sets up either one or two keys to
1066 * refer to it, depending on whether it's first/last and the setting
1067 * of skipfirst. (skiplast does not need to be explicitly tested)
1068 */
1069 static void
1070 q2cursor_addid(struct q2cursor_state *state, int idtype, id_t id)
1071 {
1072 KASSERT(state->numids < state->maxids);
1073 KASSERT(state->numkeys < state->maxkeyvals);
1074
1075 if (!state->skipfirst || state->numkeys > 0) {
1076 state->keys[state->numkeys].qk_idtype = idtype;
1077 state->keys[state->numkeys].qk_id = id;
1078 state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_BLOCKS;
1079 state->numkeys++;
1080 }
1081 if (state->numkeys < state->maxkeyvals) {
1082 state->keys[state->numkeys].qk_idtype = idtype;
1083 state->keys[state->numkeys].qk_id = id;
1084 state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_FILES;
1085 state->numkeys++;
1086 } else {
1087 KASSERT(state->skiplast);
1088 }
1089 state->numids++;
1090 }
1091
1092 /*
1093 * Callback function for getting IDs. Update counting and call addid.
1094 */
1095 static int
1096 q2cursor_getids_callback(struct ulfsmount *ump, uint64_t *offp,
1097 struct quota2_entry *q2ep, uint64_t off, void *v)
1098 {
1099 struct q2cursor_getids *gi = v;
1100 id_t id;
1101 struct lfs *fs = ump->um_lfs;
1102 const int needswap = ULFS_MPNEEDSWAP(fs);
1103
1104 if (gi->skipped < gi->skip) {
1105 gi->skipped++;
1106 return 0;
1107 }
1108 id = ulfs_rw32(q2ep->q2e_uid, needswap);
1109 q2cursor_addid(gi->state, gi->idtype, id);
1110 gi->new_skip++;
1111 if (gi->state->numids >= gi->state->maxids) {
1112 /* got enough ids, stop now */
1113 gi->stopped = 1;
1114 return Q2WL_ABORT;
1115 }
1116 return 0;
1117 }
1118
1119 /*
1120 * Fill in a batch of quotakeys by scanning one or more hash chains.
1121 */
1122 static int
1123 q2cursor_getkeys(struct ulfsmount *ump, int idtype, struct ulfsq2_cursor *cursor,
1124 struct q2cursor_state *state,
1125 int *hashsize_ret, struct quota2_entry *default_q2e_ret)
1126 {
1127 struct lfs *fs = ump->um_lfs;
1128 const int needswap = ULFS_MPNEEDSWAP(fs);
1129 struct buf *hbp;
1130 struct quota2_header *q2h;
1131 int quota2_hash_size;
1132 struct q2cursor_getids gi;
1133 uint64_t offset;
1134 int error;
1135
1136 /*
1137 * Read the header block.
1138 */
1139
1140 mutex_enter(&lfs_dqlock);
1141 error = getq2h(ump, idtype, &hbp, &q2h, 0);
1142 if (error) {
1143 mutex_exit(&lfs_dqlock);
1144 return error;
1145 }
1146
1147 /* if the table size has changed, make the caller start over */
1148 quota2_hash_size = ulfs_rw16(q2h->q2h_hash_size, needswap);
1149 if (cursor->q2c_hashsize == 0) {
1150 cursor->q2c_hashsize = quota2_hash_size;
1151 } else if (cursor->q2c_hashsize != quota2_hash_size) {
1152 error = EDEADLK;
1153 goto scanfail;
1154 }
1155
1156 /* grab the entry with the default values out of the header */
1157 lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, default_q2e_ret, needswap);
1158
1159 /* If we haven't done the defaults yet, that goes first. */
1160 if (cursor->q2c_defaults_done == 0) {
1161 q2cursor_addid(state, idtype, QUOTA_DEFAULTID);
1162 /* if we read both halves, mark it done */
1163 if (state->numids < state->maxids || !state->skiplast) {
1164 cursor->q2c_defaults_done = 1;
1165 }
1166 }
1167
1168 gi.state = state;
1169 gi.idtype = idtype;
1170
1171 while (state->numids < state->maxids) {
1172 if (cursor->q2c_hashpos >= quota2_hash_size) {
1173 /* nothing more left */
1174 break;
1175 }
1176
1177 /* scan this hash chain */
1178 gi.skip = cursor->q2c_uidpos;
1179 gi.new_skip = gi.skip;
1180 gi.skipped = 0;
1181 gi.stopped = 0;
1182 offset = q2h->q2h_entries[cursor->q2c_hashpos];
1183
1184 error = quota2_walk_list(ump, hbp, idtype, &offset, 0, &gi,
1185 q2cursor_getids_callback);
1186 KASSERT(error != Q2WL_ABORT);
1187 if (error) {
1188 break;
1189 }
1190 if (gi.stopped) {
1191 /* callback stopped before reading whole chain */
1192 cursor->q2c_uidpos = gi.new_skip;
1193 /* if we didn't get both halves, back up */
1194 if (state->numids == state->maxids && state->skiplast){
1195 KASSERT(cursor->q2c_uidpos > 0);
1196 cursor->q2c_uidpos--;
1197 }
1198 } else {
1199 /* read whole chain */
1200 /* if we got both halves of the last id, advance */
1201 if (state->numids < state->maxids || !state->skiplast){
1202 cursor->q2c_uidpos = 0;
1203 cursor->q2c_hashpos++;
1204 }
1205 }
1206 }
1207
1208 scanfail:
1209 mutex_exit(&lfs_dqlock);
1210 brelse(hbp, 0);
1211 if (error)
1212 return error;
1213
1214 *hashsize_ret = quota2_hash_size;
1215 return 0;
1216 }
1217
1218 /*
1219 * Fetch the quotavals for the quotakeys.
1220 */
1221 static int
1222 q2cursor_getvals(struct ulfsmount *ump, struct q2cursor_state *state,
1223 const struct quota2_entry *default_q2e)
1224 {
1225 int hasid;
1226 id_t loadedid, id;
1227 unsigned pos;
1228 struct quota2_entry q2e;
1229 int objtype;
1230 int error;
1231
1232 hasid = 0;
1233 loadedid = 0;
1234 for (pos = 0; pos < state->numkeys; pos++) {
1235 id = state->keys[pos].qk_id;
1236 if (!hasid || id != loadedid) {
1237 hasid = 1;
1238 loadedid = id;
1239 if (id == QUOTA_DEFAULTID) {
1240 q2e = *default_q2e;
1241 } else {
1242 error = quota2_fetch_q2e(ump,
1243 &state->keys[pos],
1244 &q2e);
1245 if (error == ENOENT) {
1246 /* something changed - start over */
1247 error = EDEADLK;
1248 }
1249 if (error) {
1250 return error;
1251 }
1252 }
1253 }
1254
1255
1256 objtype = state->keys[pos].qk_objtype;
1257 KASSERT(objtype >= 0 && objtype < N_QL);
1258 q2val_to_quotaval(&q2e.q2e_val[objtype], &state->vals[pos]);
1259 }
1260
1261 return 0;
1262 }
1263
1264 /*
1265 * Handle cursorget.
1266 *
1267 * We can't just read keys and values directly, because we can't walk
1268 * the list with qdlock and grab dq_interlock to read the entries at
1269 * the same time. So we're going to do two passes: one to figure out
1270 * which IDs we want and fill in the keys, and then a second to use
1271 * the keys to fetch the values.
1272 */
1273 int
1274 lfsquota2_handle_cmd_cursorget(struct ulfsmount *ump, struct quotakcursor *qkc,
1275 struct quotakey *keys, struct quotaval *vals, unsigned maxreturn,
1276 unsigned *ret)
1277 {
1278 int error;
1279 struct ulfsq2_cursor *cursor;
1280 struct ulfsq2_cursor newcursor;
1281 struct q2cursor_state state;
1282 struct quota2_entry default_q2e;
1283 int idtype;
1284 int quota2_hash_size = 0; /* XXXuninit */
1285
1286 /*
1287 * Convert and validate the cursor.
1288 */
1289 cursor = Q2CURSOR(qkc);
1290 error = q2cursor_check(cursor);
1291 if (error) {
1292 return error;
1293 }
1294
1295 /*
1296 * Make sure our on-disk codes match the values of the
1297 * FS-independent ones. This avoids the need for explicit
1298 * conversion (which would be a NOP anyway and thus easily
1299 * left out or called in the wrong places...)
1300 */
1301 CTASSERT(QUOTA_IDTYPE_USER == ULFS_USRQUOTA);
1302 CTASSERT(QUOTA_IDTYPE_GROUP == ULFS_GRPQUOTA);
1303 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
1304 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
1305
1306 /*
1307 * If some of the idtypes aren't configured/enabled, arrange
1308 * to skip over them.
1309 */
1310 if (cursor->q2c_users_done == 0 &&
1311 ump->um_quotas[ULFS_USRQUOTA] == NULLVP) {
1312 cursor->q2c_users_done = 1;
1313 }
1314 if (cursor->q2c_groups_done == 0 &&
1315 ump->um_quotas[ULFS_GRPQUOTA] == NULLVP) {
1316 cursor->q2c_groups_done = 1;
1317 }
1318
1319 /* Loop over, potentially, both idtypes */
1320 while (1) {
1321
1322 /* Choose id type */
1323 error = q2cursor_pickidtype(cursor, &idtype);
1324 if (error == EAGAIN) {
1325 /* nothing more to do, return 0 */
1326 *ret = 0;
1327 return 0;
1328 }
1329 KASSERT(ump->um_quotas[idtype] != NULLVP);
1330
1331 /*
1332 * Initialize the per-call iteration state. Copy the
1333 * cursor state so we can update it in place but back
1334 * out on error.
1335 */
1336 q2cursor_initstate(&state, keys, vals, maxreturn,
1337 cursor->q2c_blocks_done);
1338 newcursor = *cursor;
1339
1340 /* Assign keys */
1341 error = q2cursor_getkeys(ump, idtype, &newcursor, &state,
1342 "a2_hash_size, &default_q2e);
1343 if (error) {
1344 return error;
1345 }
1346
1347 /* Now fill in the values. */
1348 error = q2cursor_getvals(ump, &state, &default_q2e);
1349 if (error) {
1350 return error;
1351 }
1352
1353 /*
1354 * Now that we aren't going to fail and lose what we
1355 * did so far, we can update the cursor state.
1356 */
1357
1358 if (newcursor.q2c_hashpos >= quota2_hash_size) {
1359 if (idtype == QUOTA_IDTYPE_USER)
1360 cursor->q2c_users_done = 1;
1361 else
1362 cursor->q2c_groups_done = 1;
1363
1364 /* start over on another id type */
1365 cursor->q2c_hashsize = 0;
1366 cursor->q2c_defaults_done = 0;
1367 cursor->q2c_hashpos = 0;
1368 cursor->q2c_uidpos = 0;
1369 cursor->q2c_blocks_done = 0;
1370 } else {
1371 *cursor = newcursor;
1372 cursor->q2c_blocks_done = state.skiplast;
1373 }
1374
1375 /*
1376 * If we have something to return, return it.
1377 * Otherwise, continue to the other idtype, if any,
1378 * and only return zero at end of iteration.
1379 */
1380 if (state.numkeys > 0) {
1381 break;
1382 }
1383 }
1384
1385 *ret = state.numkeys;
1386 return 0;
1387 }
1388
1389 int
1390 lfsquota2_handle_cmd_cursoropen(struct ulfsmount *ump, struct quotakcursor *qkc)
1391 {
1392 struct ulfsq2_cursor *cursor;
1393
1394 CTASSERT(sizeof(*cursor) <= sizeof(qkc->u.qkc_space));
1395 cursor = Q2CURSOR(qkc);
1396
1397 cursor->q2c_magic = Q2C_MAGIC;
1398 cursor->q2c_hashsize = 0;
1399
1400 cursor->q2c_users_done = 0;
1401 cursor->q2c_groups_done = 0;
1402 cursor->q2c_defaults_done = 0;
1403 cursor->q2c_hashpos = 0;
1404 cursor->q2c_uidpos = 0;
1405 cursor->q2c_blocks_done = 0;
1406 return 0;
1407 }
1408
1409 int
1410 lfsquota2_handle_cmd_cursorclose(struct ulfsmount *ump, struct quotakcursor *qkc)
1411 {
1412 struct ulfsq2_cursor *cursor;
1413 int error;
1414
1415 cursor = Q2CURSOR(qkc);
1416 error = q2cursor_check(cursor);
1417 if (error) {
1418 return error;
1419 }
1420
1421 /* nothing to do */
1422
1423 return 0;
1424 }
1425
1426 int
1427 lfsquota2_handle_cmd_cursorskipidtype(struct ulfsmount *ump,
1428 struct quotakcursor *qkc, int idtype)
1429 {
1430 struct ulfsq2_cursor *cursor;
1431 int error;
1432
1433 cursor = Q2CURSOR(qkc);
1434 error = q2cursor_check(cursor);
1435 if (error) {
1436 return error;
1437 }
1438
1439 switch (idtype) {
1440 case QUOTA_IDTYPE_USER:
1441 cursor->q2c_users_done = 1;
1442 break;
1443 case QUOTA_IDTYPE_GROUP:
1444 cursor->q2c_groups_done = 1;
1445 break;
1446 default:
1447 return EINVAL;
1448 }
1449
1450 return 0;
1451 }
1452
1453 int
1454 lfsquota2_handle_cmd_cursoratend(struct ulfsmount *ump, struct quotakcursor *qkc,
1455 int *ret)
1456 {
1457 struct ulfsq2_cursor *cursor;
1458 int error;
1459
1460 cursor = Q2CURSOR(qkc);
1461 error = q2cursor_check(cursor);
1462 if (error) {
1463 return error;
1464 }
1465
1466 *ret = (cursor->q2c_users_done && cursor->q2c_groups_done);
1467 return 0;
1468 }
1469
1470 int
1471 lfsquota2_handle_cmd_cursorrewind(struct ulfsmount *ump, struct quotakcursor *qkc)
1472 {
1473 struct ulfsq2_cursor *cursor;
1474 int error;
1475
1476 cursor = Q2CURSOR(qkc);
1477 error = q2cursor_check(cursor);
1478 if (error) {
1479 return error;
1480 }
1481
1482 cursor->q2c_hashsize = 0;
1483
1484 cursor->q2c_users_done = 0;
1485 cursor->q2c_groups_done = 0;
1486 cursor->q2c_defaults_done = 0;
1487 cursor->q2c_hashpos = 0;
1488 cursor->q2c_uidpos = 0;
1489 cursor->q2c_blocks_done = 0;
1490
1491 return 0;
1492 }
1493
1494 int
1495 lfs_q2sync(struct mount *mp)
1496 {
1497 return 0;
1498 }
1499
1500 struct dq2get_callback {
1501 uid_t id;
1502 struct dquot *dq;
1503 };
1504
1505 static int
1506 dq2get_callback(struct ulfsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
1507 uint64_t off, void *v)
1508 {
1509 struct dq2get_callback *c = v;
1510 daddr_t lblkno;
1511 int blkoff;
1512 struct lfs *fs = ump->um_lfs;
1513 const int needswap = ULFS_MPNEEDSWAP(fs);
1514
1515 if (ulfs_rw32(q2e->q2e_uid, needswap) == c->id) {
1516 KASSERT(mutex_owned(&c->dq->dq_interlock));
1517 lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
1518 blkoff = (off & ump->umq2_bmask);
1519 c->dq->dq2_lblkno = lblkno;
1520 c->dq->dq2_blkoff = blkoff;
1521 return Q2WL_ABORT;
1522 }
1523 return 0;
1524 }
1525
1526 int
1527 lfs_dq2get(struct vnode *dqvp, u_long id, struct ulfsmount *ump, int type,
1528 struct dquot *dq)
1529 {
1530 struct buf *bp;
1531 struct quota2_header *q2h;
1532 int error;
1533 daddr_t offset;
1534 u_long hash_mask;
1535 struct dq2get_callback c = {
1536 .id = id,
1537 .dq = dq
1538 };
1539
1540 KASSERT(mutex_owned(&dq->dq_interlock));
1541 mutex_enter(&lfs_dqlock);
1542 error = getq2h(ump, type, &bp, &q2h, 0);
1543 if (error)
1544 goto out_mutex;
1545 /* look for our entry */
1546 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
1547 offset = q2h->q2h_entries[id & hash_mask];
1548 error = quota2_walk_list(ump, bp, type, &offset, 0, (void *)&c,
1549 dq2get_callback);
1550 brelse(bp, 0);
1551 out_mutex:
1552 mutex_exit(&lfs_dqlock);
1553 return error;
1554 }
1555
1556 int
1557 lfs_dq2sync(struct vnode *vp, struct dquot *dq)
1558 {
1559 return 0;
1560 }
1561
1562 int
1563 lfs_quota2_mount(struct mount *mp)
1564 {
1565 struct ulfsmount *ump = VFSTOULFS(mp);
1566 struct lfs *fs = ump->um_lfs;
1567 int error;
1568 struct vnode *vp;
1569 struct lwp *l = curlwp;
1570
1571 if ((fs->lfs_use_quota2) == 0)
1572 return 0;
1573
1574 fs->um_flags |= ULFS_QUOTA2;
1575 ump->umq2_bsize = lfs_sb_getbsize(fs);
1576 ump->umq2_bmask = lfs_sb_getbmask(fs);
1577 if (fs->lfs_quota_magic != Q2_HEAD_MAGIC) {
1578 printf("%s: Invalid quota magic number\n",
1579 mp->mnt_stat.f_mntonname);
1580 return EINVAL;
1581 }
1582
1583 error = 0;
1584 if ((fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_USRQUOTA)) &&
1585 fs->lfs_quotaino[ULFS_USRQUOTA] == 0) {
1586 printf("%s: No user quota inode\n",
1587 mp->mnt_stat.f_mntonname);
1588 error = EINVAL;
1589 }
1590 if ((fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_GRPQUOTA)) &&
1591 fs->lfs_quotaino[ULFS_GRPQUOTA] == 0) {
1592 printf("%s: No group quota inode\n",
1593 mp->mnt_stat.f_mntonname);
1594 error = EINVAL;
1595 }
1596 if (error)
1597 return error;
1598
1599 if (fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_USRQUOTA) &&
1600 ump->um_quotas[ULFS_USRQUOTA] == NULLVP) {
1601 error = VFS_VGET(mp, fs->lfs_quotaino[ULFS_USRQUOTA],
1602 LK_EXCLUSIVE, &vp);
1603 if (error) {
1604 printf("%s: can't vget() user quota inode: %d\n",
1605 mp->mnt_stat.f_mntonname, error);
1606 return error;
1607 }
1608 ump->um_quotas[ULFS_USRQUOTA] = vp;
1609 ump->um_cred[ULFS_USRQUOTA] = l->l_cred;
1610 mutex_enter(vp->v_interlock);
1611 vp->v_writecount++;
1612 mutex_exit(vp->v_interlock);
1613 VOP_UNLOCK(vp);
1614 }
1615 if (fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_GRPQUOTA) &&
1616 ump->um_quotas[ULFS_GRPQUOTA] == NULLVP) {
1617 error = VFS_VGET(mp, fs->lfs_quotaino[ULFS_GRPQUOTA],
1618 LK_EXCLUSIVE, &vp);
1619 if (error) {
1620 vn_close(ump->um_quotas[ULFS_USRQUOTA],
1621 FREAD|FWRITE, l->l_cred);
1622 printf("%s: can't vget() group quota inode: %d\n",
1623 mp->mnt_stat.f_mntonname, error);
1624 return error;
1625 }
1626 ump->um_quotas[ULFS_GRPQUOTA] = vp;
1627 ump->um_cred[ULFS_GRPQUOTA] = l->l_cred;
1628 mutex_enter(vp->v_interlock);
1629 vp->v_vflag |= VV_SYSTEM;
1630 vp->v_writecount++;
1631 mutex_exit(vp->v_interlock);
1632 VOP_UNLOCK(vp);
1633 }
1634
1635 mp->mnt_flag |= MNT_QUOTA;
1636 return 0;
1637 }
1638