ulfs_quota2.c revision 1.10 1 /* $NetBSD: ulfs_quota2.c,v 1.10 2013/07/28 01:22:55 dholland Exp $ */
2 /* from NetBSD: ufs_quota2.c,v 1.35 2012/09/27 07:47:56 bouyer Exp */
3 /* from NetBSD: ffs_quota2.c,v 1.4 2011/06/12 03:36:00 rmind Exp */
4
5 /*-
6 * Copyright (c) 2010 Manuel Bouyer
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: ulfs_quota2.c,v 1.10 2013/07/28 01:22:55 dholland Exp $");
33
34 #include <sys/buf.h>
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/namei.h>
39 #include <sys/file.h>
40 #include <sys/proc.h>
41 #include <sys/vnode.h>
42 #include <sys/mount.h>
43 #include <sys/fstrans.h>
44 #include <sys/kauth.h>
45 #include <sys/wapbl.h>
46 #include <sys/quota.h>
47 #include <sys/quotactl.h>
48
49 #include <ufs/lfs/lfs_extern.h>
50
51 #include <ufs/lfs/ulfs_quota2.h>
52 #include <ufs/lfs/ulfs_inode.h>
53 #include <ufs/lfs/ulfsmount.h>
54 #include <ufs/lfs/ulfs_bswap.h>
55 #include <ufs/lfs/ulfs_extern.h>
56 #include <ufs/lfs/ulfs_quota.h>
57
58 /*
59 * LOCKING:
60 * Data in the entries are protected by the associated struct dquot's
61 * dq_interlock (this means we can't read or change a quota entry without
62 * grabing a dquot for it).
63 * The header and lists (including pointers in the data entries, and q2e_uid)
64 * are protected by the global dqlock.
65 * the locking order is dq_interlock -> dqlock
66 */
67
68 static int quota2_bwrite(struct mount *, struct buf *);
69 static int getinoquota2(struct inode *, bool, bool, struct buf **,
70 struct quota2_entry **);
71 static int getq2h(struct ulfsmount *, int, struct buf **,
72 struct quota2_header **, int);
73 static int getq2e(struct ulfsmount *, int, daddr_t, int, struct buf **,
74 struct quota2_entry **, int);
75 static int quota2_walk_list(struct ulfsmount *, struct buf *, int,
76 uint64_t *, int, void *,
77 int (*func)(struct ulfsmount *, uint64_t *, struct quota2_entry *,
78 uint64_t, void *));
79
80 static const char *limnames[] = INITQLNAMES;
81
82 static void
83 quota2_dict_update_q2e_limits(int objtype, const struct quotaval *val,
84 struct quota2_entry *q2e)
85 {
86 /* make sure we can index q2e_val[] by the fs-independent objtype */
87 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
88 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
89
90 q2e->q2e_val[objtype].q2v_hardlimit = val->qv_hardlimit;
91 q2e->q2e_val[objtype].q2v_softlimit = val->qv_softlimit;
92 q2e->q2e_val[objtype].q2v_grace = val->qv_grace;
93 }
94
95 /*
96 * Convert internal representation to FS-independent representation.
97 * (Note that while the two types are currently identical, the
98 * internal representation is an on-disk struct and the FS-independent
99 * representation is not, and they might diverge in the future.)
100 */
101 static void
102 q2val_to_quotaval(struct quota2_val *q2v, struct quotaval *qv)
103 {
104 qv->qv_softlimit = q2v->q2v_softlimit;
105 qv->qv_hardlimit = q2v->q2v_hardlimit;
106 qv->qv_usage = q2v->q2v_cur;
107 qv->qv_expiretime = q2v->q2v_time;
108 qv->qv_grace = q2v->q2v_grace;
109 }
110
111 /*
112 * Convert a quota2entry and default-flag to the FS-independent
113 * representation.
114 */
115 static void
116 q2e_to_quotaval(struct quota2_entry *q2e, int def,
117 id_t *id, int objtype, struct quotaval *ret)
118 {
119 if (def) {
120 *id = QUOTA_DEFAULTID;
121 } else {
122 *id = q2e->q2e_uid;
123 }
124
125 KASSERT(objtype >= 0 && objtype < N_QL);
126 q2val_to_quotaval(&q2e->q2e_val[objtype], ret);
127 }
128
129
130 static int
131 quota2_bwrite(struct mount *mp, struct buf *bp)
132 {
133 if (mp->mnt_flag & MNT_SYNCHRONOUS)
134 return bwrite(bp);
135 else {
136 bdwrite(bp);
137 return 0;
138 }
139 }
140
141 static int
142 getq2h(struct ulfsmount *ump, int type,
143 struct buf **bpp, struct quota2_header **q2hp, int flags)
144 {
145 #ifdef LFS_EI
146 struct lfs *fs = ump->um_lfs;
147 const int needswap = ULFS_MPNEEDSWAP(fs);
148 #endif
149 int error;
150 struct buf *bp;
151 struct quota2_header *q2h;
152
153 KASSERT(mutex_owned(&lfs_dqlock));
154 error = bread(ump->um_quotas[type], 0, ump->umq2_bsize,
155 ump->um_cred[type], flags, &bp);
156 if (error)
157 return error;
158 if (bp->b_resid != 0)
159 panic("dq2get: %s quota file truncated", lfs_quotatypes[type]);
160
161 q2h = (void *)bp->b_data;
162 if (ulfs_rw32(q2h->q2h_magic_number, needswap) != Q2_HEAD_MAGIC ||
163 q2h->q2h_type != type)
164 panic("dq2get: corrupted %s quota header", lfs_quotatypes[type]);
165 *bpp = bp;
166 *q2hp = q2h;
167 return 0;
168 }
169
170 static int
171 getq2e(struct ulfsmount *ump, int type, daddr_t lblkno, int blkoffset,
172 struct buf **bpp, struct quota2_entry **q2ep, int flags)
173 {
174 int error;
175 struct buf *bp;
176
177 if (blkoffset & (sizeof(uint64_t) - 1)) {
178 panic("dq2get: %s quota file corrupted",
179 lfs_quotatypes[type]);
180 }
181 error = bread(ump->um_quotas[type], lblkno, ump->umq2_bsize,
182 ump->um_cred[type], flags, &bp);
183 if (error)
184 return error;
185 if (bp->b_resid != 0) {
186 panic("dq2get: %s quota file corrupted",
187 lfs_quotatypes[type]);
188 }
189 *q2ep = (void *)((char *)bp->b_data + blkoffset);
190 *bpp = bp;
191 return 0;
192 }
193
194 /* walk a quota entry list, calling the callback for each entry */
195 #define Q2WL_ABORT 0x10000000
196
197 static int
198 quota2_walk_list(struct ulfsmount *ump, struct buf *hbp, int type,
199 uint64_t *offp, int flags, void *a,
200 int (*func)(struct ulfsmount *, uint64_t *, struct quota2_entry *, uint64_t, void *))
201 {
202 #ifdef LFS_EI
203 struct lfs *fs = ump->um_lfs;
204 const int needswap = ULFS_MPNEEDSWAP(fs);
205 #endif
206 daddr_t off = ulfs_rw64(*offp, needswap);
207 struct buf *bp, *obp = hbp;
208 int ret = 0, ret2 = 0;
209 struct quota2_entry *q2e;
210 daddr_t lblkno, blkoff, olblkno = 0;
211
212 KASSERT(mutex_owner(&lfs_dqlock));
213
214 while (off != 0) {
215 lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
216 blkoff = (off & ump->umq2_bmask);
217 if (lblkno == 0) {
218 /* in the header block */
219 bp = hbp;
220 } else if (lblkno == olblkno) {
221 /* still in the same buf */
222 bp = obp;
223 } else {
224 ret = bread(ump->um_quotas[type], lblkno,
225 ump->umq2_bsize,
226 ump->um_cred[type], flags, &bp);
227 if (ret)
228 return ret;
229 if (bp->b_resid != 0) {
230 panic("quota2_walk_list: %s quota file corrupted",
231 lfs_quotatypes[type]);
232 }
233 }
234 q2e = (void *)((char *)(bp->b_data) + blkoff);
235 ret = (*func)(ump, offp, q2e, off, a);
236 if (off != ulfs_rw64(*offp, needswap)) {
237 /* callback changed parent's pointer, redo */
238 off = ulfs_rw64(*offp, needswap);
239 if (bp != hbp && bp != obp)
240 ret2 = bwrite(bp);
241 } else {
242 /* parent if now current */
243 if (obp != bp && obp != hbp) {
244 if (flags & B_MODIFY)
245 ret2 = bwrite(obp);
246 else
247 brelse(obp, 0);
248 }
249 obp = bp;
250 olblkno = lblkno;
251 offp = &(q2e->q2e_next);
252 off = ulfs_rw64(*offp, needswap);
253 }
254 if (ret)
255 break;
256 if (ret2) {
257 ret = ret2;
258 break;
259 }
260 }
261 if (obp != hbp) {
262 if (flags & B_MODIFY)
263 ret2 = bwrite(obp);
264 else
265 brelse(obp, 0);
266 }
267 if (ret & Q2WL_ABORT)
268 return 0;
269 if (ret == 0)
270 return ret2;
271 return ret;
272 }
273
274 int
275 lfsquota2_umount(struct mount *mp, int flags)
276 {
277 int i, error;
278 struct ulfsmount *ump = VFSTOULFS(mp);
279 struct lfs *fs = ump->um_lfs;
280
281 if ((fs->um_flags & ULFS_QUOTA2) == 0)
282 return 0;
283
284 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
285 if (ump->um_quotas[i] != NULLVP) {
286 error = vn_close(ump->um_quotas[i], FREAD|FWRITE,
287 ump->um_cred[i]);
288 if (error) {
289 printf("quota2_umount failed: close(%p) %d\n",
290 ump->um_quotas[i], error);
291 return error;
292 }
293 }
294 ump->um_quotas[i] = NULLVP;
295 }
296 return 0;
297 }
298
299 static int
300 quota2_q2ealloc(struct ulfsmount *ump, int type, uid_t uid, struct dquot *dq)
301 {
302 int error, error2;
303 struct buf *hbp, *bp;
304 struct quota2_header *q2h;
305 struct quota2_entry *q2e;
306 daddr_t offset;
307 u_long hash_mask;
308 struct lfs *fs = ump->um_lfs;
309 const int needswap = ULFS_MPNEEDSWAP(fs);
310
311 KASSERT(mutex_owned(&dq->dq_interlock));
312 KASSERT(mutex_owned(&lfs_dqlock));
313 error = getq2h(ump, type, &hbp, &q2h, B_MODIFY);
314 if (error)
315 return error;
316 offset = ulfs_rw64(q2h->q2h_free, needswap);
317 if (offset == 0) {
318 struct vnode *vp = ump->um_quotas[type];
319 struct inode *ip = VTOI(vp);
320 uint64_t size = ip->i_size;
321 /* need to alocate a new disk block */
322 error = lfs_balloc(vp, size, ump->umq2_bsize,
323 ump->um_cred[type], B_CLRBUF | B_SYNC, &bp);
324 if (error) {
325 brelse(hbp, 0);
326 return error;
327 }
328 KASSERT((ip->i_size % ump->umq2_bsize) == 0);
329 ip->i_size += ump->umq2_bsize;
330 DIP_ASSIGN(ip, size, ip->i_size);
331 ip->i_flag |= IN_CHANGE | IN_UPDATE;
332 uvm_vnp_setsize(vp, ip->i_size);
333 lfsquota2_addfreeq2e(q2h, bp->b_data, size, ump->umq2_bsize,
334 needswap);
335 error = bwrite(bp);
336 error2 = lfs_update(vp, NULL, NULL, UPDATE_WAIT);
337 if (error || error2) {
338 brelse(hbp, 0);
339 if (error)
340 return error;
341 return error2;
342 }
343 offset = ulfs_rw64(q2h->q2h_free, needswap);
344 KASSERT(offset != 0);
345 }
346 dq->dq2_lblkno = (offset >> ump->um_mountp->mnt_fs_bshift);
347 dq->dq2_blkoff = (offset & ump->umq2_bmask);
348 if (dq->dq2_lblkno == 0) {
349 bp = hbp;
350 q2e = (void *)((char *)bp->b_data + dq->dq2_blkoff);
351 } else {
352 error = getq2e(ump, type, dq->dq2_lblkno,
353 dq->dq2_blkoff, &bp, &q2e, B_MODIFY);
354 if (error) {
355 brelse(hbp, 0);
356 return error;
357 }
358 }
359 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
360 /* remove from free list */
361 q2h->q2h_free = q2e->q2e_next;
362
363 memcpy(q2e, &q2h->q2h_defentry, sizeof(*q2e));
364 q2e->q2e_uid = ulfs_rw32(uid, needswap);
365 /* insert in hash list */
366 q2e->q2e_next = q2h->q2h_entries[uid & hash_mask];
367 q2h->q2h_entries[uid & hash_mask] = ulfs_rw64(offset, needswap);
368 if (hbp != bp) {
369 bwrite(hbp);
370 }
371 bwrite(bp);
372 return 0;
373 }
374
375 static int
376 getinoquota2(struct inode *ip, bool alloc, bool modify, struct buf **bpp,
377 struct quota2_entry **q2ep)
378 {
379 int error;
380 int i;
381 struct dquot *dq;
382 struct ulfsmount *ump = ip->i_ump;
383 u_int32_t ino_ids[ULFS_MAXQUOTAS];
384
385 error = lfs_getinoquota(ip);
386 if (error)
387 return error;
388
389 ino_ids[ULFS_USRQUOTA] = ip->i_uid;
390 ino_ids[ULFS_GRPQUOTA] = ip->i_gid;
391 /* first get the interlock for all dquot */
392 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
393 dq = ip->i_dquot[i];
394 if (dq == NODQUOT)
395 continue;
396 mutex_enter(&dq->dq_interlock);
397 }
398 /* now get the corresponding quota entry */
399 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
400 bpp[i] = NULL;
401 q2ep[i] = NULL;
402 dq = ip->i_dquot[i];
403 if (dq == NODQUOT)
404 continue;
405 if (__predict_false(ump->um_quotas[i] == NULL)) {
406 /*
407 * quotas have been turned off. This can happen
408 * at umount time.
409 */
410 mutex_exit(&dq->dq_interlock);
411 lfs_dqrele(NULLVP, dq);
412 ip->i_dquot[i] = NULL;
413 continue;
414 }
415
416 if ((dq->dq2_lblkno | dq->dq2_blkoff) == 0) {
417 if (!alloc) {
418 continue;
419 }
420 /* need to alloc a new on-disk quot */
421 mutex_enter(&lfs_dqlock);
422 error = quota2_q2ealloc(ump, i, ino_ids[i], dq);
423 mutex_exit(&lfs_dqlock);
424 if (error)
425 return error;
426 }
427 KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
428 error = getq2e(ump, i, dq->dq2_lblkno,
429 dq->dq2_blkoff, &bpp[i], &q2ep[i],
430 modify ? B_MODIFY : 0);
431 if (error)
432 return error;
433 }
434 return 0;
435 }
436
437 __inline static int __unused
438 lfsquota2_check_limit(struct quota2_val *q2v, uint64_t change, time_t now)
439 {
440 return lfsquota_check_limit(q2v->q2v_cur, change, q2v->q2v_softlimit,
441 q2v->q2v_hardlimit, q2v->q2v_time, now);
442 }
443
444 static int
445 quota2_check(struct inode *ip, int vtype, int64_t change, kauth_cred_t cred,
446 int flags)
447 {
448 int error;
449 struct buf *bp[ULFS_MAXQUOTAS];
450 struct quota2_entry *q2e[ULFS_MAXQUOTAS];
451 struct quota2_val *q2vp;
452 struct dquot *dq;
453 uint64_t ncurblks;
454 struct ulfsmount *ump = ip->i_ump;
455 struct lfs *fs = ip->i_lfs;
456 struct mount *mp = ump->um_mountp;
457 const int needswap = ULFS_MPNEEDSWAP(fs);
458 int i;
459
460 if ((error = getinoquota2(ip, change > 0, change != 0, bp, q2e)) != 0)
461 return error;
462 if (change == 0) {
463 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
464 dq = ip->i_dquot[i];
465 if (dq == NODQUOT)
466 continue;
467 if (bp[i])
468 brelse(bp[i], 0);
469 mutex_exit(&dq->dq_interlock);
470 }
471 return 0;
472 }
473 if (change < 0) {
474 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
475 dq = ip->i_dquot[i];
476 if (dq == NODQUOT)
477 continue;
478 if (q2e[i] == NULL) {
479 mutex_exit(&dq->dq_interlock);
480 continue;
481 }
482 q2vp = &q2e[i]->q2e_val[vtype];
483 ncurblks = ulfs_rw64(q2vp->q2v_cur, needswap);
484 if (ncurblks < -change)
485 ncurblks = 0;
486 else
487 ncurblks += change;
488 q2vp->q2v_cur = ulfs_rw64(ncurblks, needswap);
489 quota2_bwrite(mp, bp[i]);
490 mutex_exit(&dq->dq_interlock);
491 }
492 return 0;
493 }
494 /* see if the allocation is allowed */
495 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
496 struct quota2_val q2v;
497 int ql_stat;
498 dq = ip->i_dquot[i];
499 if (dq == NODQUOT)
500 continue;
501 KASSERT(q2e[i] != NULL);
502 lfsquota2_ulfs_rwq2v(&q2e[i]->q2e_val[vtype], &q2v, needswap);
503 ql_stat = lfsquota2_check_limit(&q2v, change, time_second);
504
505 if ((flags & FORCE) == 0 &&
506 kauth_authorize_system(cred, KAUTH_SYSTEM_FS_QUOTA,
507 KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT,
508 KAUTH_ARG(i), KAUTH_ARG(vtype), NULL) != 0) {
509 /* enforce this limit */
510 switch(QL_STATUS(ql_stat)) {
511 case QL_S_DENY_HARD:
512 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
513 uprintf("\n%s: write failed, %s %s "
514 "limit reached\n",
515 mp->mnt_stat.f_mntonname,
516 lfs_quotatypes[i], limnames[vtype]);
517 dq->dq_flags |= DQ_WARN(vtype);
518 }
519 error = EDQUOT;
520 break;
521 case QL_S_DENY_GRACE:
522 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
523 uprintf("\n%s: write failed, %s %s "
524 "limit reached\n",
525 mp->mnt_stat.f_mntonname,
526 lfs_quotatypes[i], limnames[vtype]);
527 dq->dq_flags |= DQ_WARN(vtype);
528 }
529 error = EDQUOT;
530 break;
531 case QL_S_ALLOW_SOFT:
532 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
533 uprintf("\n%s: warning, %s %s "
534 "quota exceeded\n",
535 mp->mnt_stat.f_mntonname,
536 lfs_quotatypes[i], limnames[vtype]);
537 dq->dq_flags |= DQ_WARN(vtype);
538 }
539 break;
540 }
541 }
542 /*
543 * always do this; we don't know if the allocation will
544 * succed or not in the end. if we don't do the allocation
545 * q2v_time will be ignored anyway
546 */
547 if (ql_stat & QL_F_CROSS) {
548 q2v.q2v_time = time_second + q2v.q2v_grace;
549 lfsquota2_ulfs_rwq2v(&q2v, &q2e[i]->q2e_val[vtype],
550 needswap);
551 }
552 }
553
554 /* now do the allocation if allowed */
555 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
556 dq = ip->i_dquot[i];
557 if (dq == NODQUOT)
558 continue;
559 KASSERT(q2e[i] != NULL);
560 if (error == 0) {
561 q2vp = &q2e[i]->q2e_val[vtype];
562 ncurblks = ulfs_rw64(q2vp->q2v_cur, needswap);
563 q2vp->q2v_cur = ulfs_rw64(ncurblks + change, needswap);
564 quota2_bwrite(mp, bp[i]);
565 } else
566 brelse(bp[i], 0);
567 mutex_exit(&dq->dq_interlock);
568 }
569 return error;
570 }
571
572 int
573 lfs_chkdq2(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
574 {
575 return quota2_check(ip, QL_BLOCK, change, cred, flags);
576 }
577
578 int
579 lfs_chkiq2(struct inode *ip, int32_t change, kauth_cred_t cred, int flags)
580 {
581 return quota2_check(ip, QL_FILE, change, cred, flags);
582 }
583
584 int
585 lfsquota2_handle_cmd_put(struct ulfsmount *ump, const struct quotakey *key,
586 const struct quotaval *val)
587 {
588 int error;
589 struct dquot *dq;
590 struct quota2_header *q2h;
591 struct quota2_entry q2e, *q2ep;
592 struct buf *bp;
593 struct lfs *fs = ump->um_lfs;
594 const int needswap = ULFS_MPNEEDSWAP(fs);
595
596 /* make sure we can index by the fs-independent idtype */
597 CTASSERT(QUOTA_IDTYPE_USER == ULFS_USRQUOTA);
598 CTASSERT(QUOTA_IDTYPE_GROUP == ULFS_GRPQUOTA);
599
600 if (ump->um_quotas[key->qk_idtype] == NULLVP)
601 return ENODEV;
602
603 if (key->qk_id == QUOTA_DEFAULTID) {
604 mutex_enter(&lfs_dqlock);
605 error = getq2h(ump, key->qk_idtype, &bp, &q2h, B_MODIFY);
606 if (error) {
607 mutex_exit(&lfs_dqlock);
608 goto out_wapbl;
609 }
610 lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
611 quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
612 lfsquota2_ulfs_rwq2e(&q2e, &q2h->q2h_defentry, needswap);
613 mutex_exit(&lfs_dqlock);
614 quota2_bwrite(ump->um_mountp, bp);
615 goto out_wapbl;
616 }
617
618 error = lfs_dqget(NULLVP, key->qk_id, ump, key->qk_idtype, &dq);
619 if (error)
620 goto out_wapbl;
621
622 mutex_enter(&dq->dq_interlock);
623 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
624 /* need to alloc a new on-disk quot */
625 mutex_enter(&lfs_dqlock);
626 error = quota2_q2ealloc(ump, key->qk_idtype, key->qk_id, dq);
627 mutex_exit(&lfs_dqlock);
628 if (error)
629 goto out_il;
630 }
631 KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
632 error = getq2e(ump, key->qk_idtype, dq->dq2_lblkno,
633 dq->dq2_blkoff, &bp, &q2ep, B_MODIFY);
634 if (error)
635 goto out_il;
636
637 lfsquota2_ulfs_rwq2e(q2ep, &q2e, needswap);
638 quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
639 lfsquota2_ulfs_rwq2e(&q2e, q2ep, needswap);
640 quota2_bwrite(ump->um_mountp, bp);
641
642 out_il:
643 mutex_exit(&dq->dq_interlock);
644 lfs_dqrele(NULLVP, dq);
645 out_wapbl:
646 return error;
647 }
648
649 struct dq2clear_callback {
650 uid_t id;
651 struct dquot *dq;
652 struct quota2_header *q2h;
653 };
654
655 static int
656 dq2clear_callback(struct ulfsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
657 uint64_t off, void *v)
658 {
659 struct dq2clear_callback *c = v;
660 #ifdef LFS_EI
661 struct lfs *fs = ump->um_lfs;
662 const int needswap = ULFS_MPNEEDSWAP(fs);
663 #endif
664 uint64_t myoff;
665
666 if (ulfs_rw32(q2e->q2e_uid, needswap) == c->id) {
667 KASSERT(mutex_owned(&c->dq->dq_interlock));
668 c->dq->dq2_lblkno = 0;
669 c->dq->dq2_blkoff = 0;
670 myoff = *offp;
671 /* remove from hash list */
672 *offp = q2e->q2e_next;
673 /* add to free list */
674 q2e->q2e_next = c->q2h->q2h_free;
675 c->q2h->q2h_free = myoff;
676 return Q2WL_ABORT;
677 }
678 return 0;
679 }
680 int
681 lfsquota2_handle_cmd_delete(struct ulfsmount *ump, const struct quotakey *qk)
682 {
683 int idtype;
684 id_t id;
685 int objtype;
686 int error, i, canfree;
687 struct dquot *dq;
688 struct quota2_header *q2h;
689 struct quota2_entry q2e, *q2ep;
690 struct buf *hbp, *bp;
691 u_long hash_mask;
692 struct dq2clear_callback c;
693
694 idtype = qk->qk_idtype;
695 id = qk->qk_id;
696 objtype = qk->qk_objtype;
697
698 if (ump->um_quotas[idtype] == NULLVP)
699 return ENODEV;
700 if (id == QUOTA_DEFAULTID)
701 return EOPNOTSUPP;
702
703 /* get the default entry before locking the entry's buffer */
704 mutex_enter(&lfs_dqlock);
705 error = getq2h(ump, idtype, &hbp, &q2h, 0);
706 if (error) {
707 mutex_exit(&lfs_dqlock);
708 return error;
709 }
710 /* we'll copy to another disk entry, so no need to swap */
711 memcpy(&q2e, &q2h->q2h_defentry, sizeof(q2e));
712 mutex_exit(&lfs_dqlock);
713 brelse(hbp, 0);
714
715 error = lfs_dqget(NULLVP, id, ump, idtype, &dq);
716 if (error)
717 return error;
718
719 mutex_enter(&dq->dq_interlock);
720 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
721 /* already clear, nothing to do */
722 error = ENOENT;
723 goto out_il;
724 }
725
726 error = getq2e(ump, idtype, dq->dq2_lblkno, dq->dq2_blkoff,
727 &bp, &q2ep, B_MODIFY);
728 if (error)
729 goto out_wapbl;
730
731 /* make sure we can index by the objtype passed in */
732 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
733 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
734
735 /* clear the requested objtype by copying from the default entry */
736 q2ep->q2e_val[objtype].q2v_softlimit =
737 q2e.q2e_val[objtype].q2v_softlimit;
738 q2ep->q2e_val[objtype].q2v_hardlimit =
739 q2e.q2e_val[objtype].q2v_hardlimit;
740 q2ep->q2e_val[objtype].q2v_grace =
741 q2e.q2e_val[objtype].q2v_grace;
742 q2ep->q2e_val[objtype].q2v_time = 0;
743
744 /* if this entry now contains no information, we can free it */
745 canfree = 1;
746 for (i = 0; i < N_QL; i++) {
747 if (q2ep->q2e_val[i].q2v_cur != 0 ||
748 (q2ep->q2e_val[i].q2v_softlimit !=
749 q2e.q2e_val[i].q2v_softlimit) ||
750 (q2ep->q2e_val[i].q2v_hardlimit !=
751 q2e.q2e_val[i].q2v_hardlimit) ||
752 (q2ep->q2e_val[i].q2v_grace !=
753 q2e.q2e_val[i].q2v_grace)) {
754 canfree = 0;
755 break;
756 }
757 /* note: do not need to check q2v_time */
758 }
759
760 if (canfree == 0) {
761 quota2_bwrite(ump->um_mountp, bp);
762 goto out_wapbl;
763 }
764 /* we can free it. release bp so we can walk the list */
765 brelse(bp, 0);
766 mutex_enter(&lfs_dqlock);
767 error = getq2h(ump, idtype, &hbp, &q2h, 0);
768 if (error)
769 goto out_dqlock;
770
771 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
772 c.dq = dq;
773 c.id = id;
774 c.q2h = q2h;
775 error = quota2_walk_list(ump, hbp, idtype,
776 &q2h->q2h_entries[id & hash_mask], B_MODIFY, &c,
777 dq2clear_callback);
778
779 bwrite(hbp);
780
781 out_dqlock:
782 mutex_exit(&lfs_dqlock);
783 out_wapbl:
784 out_il:
785 mutex_exit(&dq->dq_interlock);
786 lfs_dqrele(NULLVP, dq);
787 return error;
788 }
789
790 static int
791 quota2_fetch_q2e(struct ulfsmount *ump, const struct quotakey *qk,
792 struct quota2_entry *ret)
793 {
794 struct dquot *dq;
795 int error;
796 struct quota2_entry *q2ep;
797 struct buf *bp;
798 struct lfs *fs = ump->um_lfs;
799 const int needswap = ULFS_MPNEEDSWAP(fs);
800
801 error = lfs_dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
802 if (error)
803 return error;
804
805 mutex_enter(&dq->dq_interlock);
806 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
807 mutex_exit(&dq->dq_interlock);
808 lfs_dqrele(NULLVP, dq);
809 return ENOENT;
810 }
811 error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
812 &bp, &q2ep, 0);
813 if (error) {
814 mutex_exit(&dq->dq_interlock);
815 lfs_dqrele(NULLVP, dq);
816 return error;
817 }
818 lfsquota2_ulfs_rwq2e(q2ep, ret, needswap);
819 brelse(bp, 0);
820 mutex_exit(&dq->dq_interlock);
821 lfs_dqrele(NULLVP, dq);
822
823 return 0;
824 }
825
826 static int
827 quota2_fetch_quotaval(struct ulfsmount *ump, const struct quotakey *qk,
828 struct quotaval *ret)
829 {
830 struct dquot *dq;
831 int error;
832 struct quota2_entry *q2ep, q2e;
833 struct buf *bp;
834 struct lfs *fs = ump->um_lfs;
835 const int needswap = ULFS_MPNEEDSWAP(fs);
836 id_t id2;
837
838 error = lfs_dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
839 if (error)
840 return error;
841
842 mutex_enter(&dq->dq_interlock);
843 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
844 mutex_exit(&dq->dq_interlock);
845 lfs_dqrele(NULLVP, dq);
846 return ENOENT;
847 }
848 error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
849 &bp, &q2ep, 0);
850 if (error) {
851 mutex_exit(&dq->dq_interlock);
852 lfs_dqrele(NULLVP, dq);
853 return error;
854 }
855 lfsquota2_ulfs_rwq2e(q2ep, &q2e, needswap);
856 brelse(bp, 0);
857 mutex_exit(&dq->dq_interlock);
858 lfs_dqrele(NULLVP, dq);
859
860 q2e_to_quotaval(&q2e, 0, &id2, qk->qk_objtype, ret);
861 KASSERT(id2 == qk->qk_id);
862 return 0;
863 }
864
865 int
866 lfsquota2_handle_cmd_get(struct ulfsmount *ump, const struct quotakey *qk,
867 struct quotaval *qv)
868 {
869 int error;
870 struct quota2_header *q2h;
871 struct quota2_entry q2e;
872 struct buf *bp;
873 struct lfs *fs = ump->um_lfs;
874 const int needswap = ULFS_MPNEEDSWAP(fs);
875 id_t id2;
876
877 /*
878 * Make sure the FS-independent codes match the internal ones,
879 * so we can use the passed-in objtype without having to
880 * convert it explicitly to QL_BLOCK/QL_FILE.
881 */
882 CTASSERT(QL_BLOCK == QUOTA_OBJTYPE_BLOCKS);
883 CTASSERT(QL_FILE == QUOTA_OBJTYPE_FILES);
884 CTASSERT(N_QL == 2);
885
886 if (qk->qk_objtype < 0 || qk->qk_objtype >= N_QL) {
887 return EINVAL;
888 }
889
890 if (ump->um_quotas[qk->qk_idtype] == NULLVP)
891 return ENODEV;
892 if (qk->qk_id == QUOTA_DEFAULTID) {
893 mutex_enter(&lfs_dqlock);
894 error = getq2h(ump, qk->qk_idtype, &bp, &q2h, 0);
895 if (error) {
896 mutex_exit(&lfs_dqlock);
897 return error;
898 }
899 lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
900 mutex_exit(&lfs_dqlock);
901 brelse(bp, 0);
902 q2e_to_quotaval(&q2e, qk->qk_id == QUOTA_DEFAULTID, &id2,
903 qk->qk_objtype, qv);
904 (void)id2;
905 } else
906 error = quota2_fetch_quotaval(ump, qk, qv);
907
908 return error;
909 }
910
911 /*
912 * Cursor structure we used.
913 *
914 * This will get stored in userland between calls so we must not assume
915 * it isn't arbitrarily corrupted.
916 */
917 struct ulfsq2_cursor {
918 uint32_t q2c_magic; /* magic number */
919 int q2c_hashsize; /* size of hash table at last go */
920
921 int q2c_users_done; /* true if we've returned all user data */
922 int q2c_groups_done; /* true if we've returned all group data */
923 int q2c_defaults_done; /* true if we've returned the default values */
924 int q2c_hashpos; /* slot to start at in hash table */
925 int q2c_uidpos; /* number of ids we've handled */
926 int q2c_blocks_done; /* true if we've returned the blocks value */
927 };
928
929 /*
930 * State of a single cursorget call, or at least the part of it that
931 * needs to be passed around.
932 */
933 struct q2cursor_state {
934 /* data return pointers */
935 struct quotakey *keys;
936 struct quotaval *vals;
937
938 /* key/value counters */
939 unsigned maxkeyvals;
940 unsigned numkeys; /* number of keys assigned */
941
942 /* ID to key/value conversion state */
943 int skipfirst; /* if true skip first key/value */
944 int skiplast; /* if true skip last key/value */
945
946 /* ID counters */
947 unsigned maxids; /* maximum number of IDs to handle */
948 unsigned numids; /* number of IDs handled */
949 };
950
951 /*
952 * Additional structure for getids callback.
953 */
954 struct q2cursor_getids {
955 struct q2cursor_state *state;
956 int idtype;
957 unsigned skip; /* number of ids to skip over */
958 unsigned new_skip; /* number of ids to skip over next time */
959 unsigned skipped; /* number skipped so far */
960 int stopped; /* true if we stopped quota_walk_list early */
961 };
962
963 /*
964 * Cursor-related functions
965 */
966
967 /* magic number */
968 #define Q2C_MAGIC (0xbeebe111)
969
970 /* extract cursor from caller form */
971 #define Q2CURSOR(qkc) ((struct ulfsq2_cursor *)&qkc->u.qkc_space[0])
972
973 /*
974 * Check that a cursor we're handed is something like valid. If
975 * someone munges it and it still passes these checks, they'll get
976 * partial or odd results back but won't break anything.
977 */
978 static int
979 q2cursor_check(struct ulfsq2_cursor *cursor)
980 {
981 if (cursor->q2c_magic != Q2C_MAGIC) {
982 return EINVAL;
983 }
984 if (cursor->q2c_hashsize < 0) {
985 return EINVAL;
986 }
987
988 if (cursor->q2c_users_done != 0 && cursor->q2c_users_done != 1) {
989 return EINVAL;
990 }
991 if (cursor->q2c_groups_done != 0 && cursor->q2c_groups_done != 1) {
992 return EINVAL;
993 }
994 if (cursor->q2c_defaults_done != 0 && cursor->q2c_defaults_done != 1) {
995 return EINVAL;
996 }
997 if (cursor->q2c_hashpos < 0 || cursor->q2c_uidpos < 0) {
998 return EINVAL;
999 }
1000 if (cursor->q2c_blocks_done != 0 && cursor->q2c_blocks_done != 1) {
1001 return EINVAL;
1002 }
1003 return 0;
1004 }
1005
1006 /*
1007 * Set up the q2cursor state.
1008 */
1009 static void
1010 q2cursor_initstate(struct q2cursor_state *state, struct quotakey *keys,
1011 struct quotaval *vals, unsigned maxkeyvals, int blocks_done)
1012 {
1013 state->keys = keys;
1014 state->vals = vals;
1015
1016 state->maxkeyvals = maxkeyvals;
1017 state->numkeys = 0;
1018
1019 /*
1020 * For each ID there are two quotavals to return. If the
1021 * maximum number of entries to return is odd, we might want
1022 * to skip the first quotaval of the first ID, or the last
1023 * quotaval of the last ID, but not both. So the number of IDs
1024 * we want is (up to) half the number of return slots we have,
1025 * rounded up.
1026 */
1027
1028 state->maxids = (state->maxkeyvals + 1) / 2;
1029 state->numids = 0;
1030 if (state->maxkeyvals % 2) {
1031 if (blocks_done) {
1032 state->skipfirst = 1;
1033 state->skiplast = 0;
1034 } else {
1035 state->skipfirst = 0;
1036 state->skiplast = 1;
1037 }
1038 } else {
1039 state->skipfirst = 0;
1040 state->skiplast = 0;
1041 }
1042 }
1043
1044 /*
1045 * Choose which idtype we're going to work on. If doing a full
1046 * iteration, we do users first, then groups, but either might be
1047 * disabled or marked to skip via cursorsetidtype(), so don't make
1048 * silly assumptions.
1049 */
1050 static int
1051 q2cursor_pickidtype(struct ulfsq2_cursor *cursor, int *idtype_ret)
1052 {
1053 if (cursor->q2c_users_done == 0) {
1054 *idtype_ret = QUOTA_IDTYPE_USER;
1055 } else if (cursor->q2c_groups_done == 0) {
1056 *idtype_ret = QUOTA_IDTYPE_GROUP;
1057 } else {
1058 return EAGAIN;
1059 }
1060 return 0;
1061 }
1062
1063 /*
1064 * Add an ID to the current state. Sets up either one or two keys to
1065 * refer to it, depending on whether it's first/last and the setting
1066 * of skipfirst. (skiplast does not need to be explicitly tested)
1067 */
1068 static void
1069 q2cursor_addid(struct q2cursor_state *state, int idtype, id_t id)
1070 {
1071 KASSERT(state->numids < state->maxids);
1072 KASSERT(state->numkeys < state->maxkeyvals);
1073
1074 if (!state->skipfirst || state->numkeys > 0) {
1075 state->keys[state->numkeys].qk_idtype = idtype;
1076 state->keys[state->numkeys].qk_id = id;
1077 state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_BLOCKS;
1078 state->numkeys++;
1079 }
1080 if (state->numkeys < state->maxkeyvals) {
1081 state->keys[state->numkeys].qk_idtype = idtype;
1082 state->keys[state->numkeys].qk_id = id;
1083 state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_FILES;
1084 state->numkeys++;
1085 } else {
1086 KASSERT(state->skiplast);
1087 }
1088 state->numids++;
1089 }
1090
1091 /*
1092 * Callback function for getting IDs. Update counting and call addid.
1093 */
1094 static int
1095 q2cursor_getids_callback(struct ulfsmount *ump, uint64_t *offp,
1096 struct quota2_entry *q2ep, uint64_t off, void *v)
1097 {
1098 struct q2cursor_getids *gi = v;
1099 id_t id;
1100 #ifdef LFS_EI
1101 struct lfs *fs = ump->um_lfs;
1102 const int needswap = ULFS_MPNEEDSWAP(fs);
1103 #endif
1104
1105 if (gi->skipped < gi->skip) {
1106 gi->skipped++;
1107 return 0;
1108 }
1109 id = ulfs_rw32(q2ep->q2e_uid, needswap);
1110 q2cursor_addid(gi->state, gi->idtype, id);
1111 gi->new_skip++;
1112 if (gi->state->numids >= gi->state->maxids) {
1113 /* got enough ids, stop now */
1114 gi->stopped = 1;
1115 return Q2WL_ABORT;
1116 }
1117 return 0;
1118 }
1119
1120 /*
1121 * Fill in a batch of quotakeys by scanning one or more hash chains.
1122 */
1123 static int
1124 q2cursor_getkeys(struct ulfsmount *ump, int idtype, struct ulfsq2_cursor *cursor,
1125 struct q2cursor_state *state,
1126 int *hashsize_ret, struct quota2_entry *default_q2e_ret)
1127 {
1128 struct lfs *fs = ump->um_lfs;
1129 const int needswap = ULFS_MPNEEDSWAP(fs);
1130 struct buf *hbp;
1131 struct quota2_header *q2h;
1132 int quota2_hash_size;
1133 struct q2cursor_getids gi;
1134 uint64_t offset;
1135 int error;
1136
1137 /*
1138 * Read the header block.
1139 */
1140
1141 mutex_enter(&lfs_dqlock);
1142 error = getq2h(ump, idtype, &hbp, &q2h, 0);
1143 if (error) {
1144 mutex_exit(&lfs_dqlock);
1145 return error;
1146 }
1147
1148 /* if the table size has changed, make the caller start over */
1149 quota2_hash_size = ulfs_rw16(q2h->q2h_hash_size, needswap);
1150 if (cursor->q2c_hashsize == 0) {
1151 cursor->q2c_hashsize = quota2_hash_size;
1152 } else if (cursor->q2c_hashsize != quota2_hash_size) {
1153 error = EDEADLK;
1154 goto scanfail;
1155 }
1156
1157 /* grab the entry with the default values out of the header */
1158 lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, default_q2e_ret, needswap);
1159
1160 /* If we haven't done the defaults yet, that goes first. */
1161 if (cursor->q2c_defaults_done == 0) {
1162 q2cursor_addid(state, idtype, QUOTA_DEFAULTID);
1163 /* if we read both halves, mark it done */
1164 if (state->numids < state->maxids || !state->skiplast) {
1165 cursor->q2c_defaults_done = 1;
1166 }
1167 }
1168
1169 gi.state = state;
1170 gi.idtype = idtype;
1171
1172 while (state->numids < state->maxids) {
1173 if (cursor->q2c_hashpos >= quota2_hash_size) {
1174 /* nothing more left */
1175 break;
1176 }
1177
1178 /* scan this hash chain */
1179 gi.skip = cursor->q2c_uidpos;
1180 gi.new_skip = gi.skip;
1181 gi.skipped = 0;
1182 gi.stopped = 0;
1183 offset = q2h->q2h_entries[cursor->q2c_hashpos];
1184
1185 error = quota2_walk_list(ump, hbp, idtype, &offset, 0, &gi,
1186 q2cursor_getids_callback);
1187 KASSERT(error != Q2WL_ABORT);
1188 if (error) {
1189 break;
1190 }
1191 if (gi.stopped) {
1192 /* callback stopped before reading whole chain */
1193 cursor->q2c_uidpos = gi.new_skip;
1194 /* if we didn't get both halves, back up */
1195 if (state->numids == state->maxids && state->skiplast){
1196 KASSERT(cursor->q2c_uidpos > 0);
1197 cursor->q2c_uidpos--;
1198 }
1199 } else {
1200 /* read whole chain */
1201 /* if we got both halves of the last id, advance */
1202 if (state->numids < state->maxids || !state->skiplast){
1203 cursor->q2c_uidpos = 0;
1204 cursor->q2c_hashpos++;
1205 }
1206 }
1207 }
1208
1209 scanfail:
1210 mutex_exit(&lfs_dqlock);
1211 brelse(hbp, 0);
1212 if (error)
1213 return error;
1214
1215 *hashsize_ret = quota2_hash_size;
1216 return 0;
1217 }
1218
1219 /*
1220 * Fetch the quotavals for the quotakeys.
1221 */
1222 static int
1223 q2cursor_getvals(struct ulfsmount *ump, struct q2cursor_state *state,
1224 const struct quota2_entry *default_q2e)
1225 {
1226 int hasid;
1227 id_t loadedid, id;
1228 unsigned pos;
1229 struct quota2_entry q2e;
1230 int objtype;
1231 int error;
1232
1233 hasid = 0;
1234 loadedid = 0;
1235 for (pos = 0; pos < state->numkeys; pos++) {
1236 id = state->keys[pos].qk_id;
1237 if (!hasid || id != loadedid) {
1238 hasid = 1;
1239 loadedid = id;
1240 if (id == QUOTA_DEFAULTID) {
1241 q2e = *default_q2e;
1242 } else {
1243 error = quota2_fetch_q2e(ump,
1244 &state->keys[pos],
1245 &q2e);
1246 if (error == ENOENT) {
1247 /* something changed - start over */
1248 error = EDEADLK;
1249 }
1250 if (error) {
1251 return error;
1252 }
1253 }
1254 }
1255
1256
1257 objtype = state->keys[pos].qk_objtype;
1258 KASSERT(objtype >= 0 && objtype < N_QL);
1259 q2val_to_quotaval(&q2e.q2e_val[objtype], &state->vals[pos]);
1260 }
1261
1262 return 0;
1263 }
1264
1265 /*
1266 * Handle cursorget.
1267 *
1268 * We can't just read keys and values directly, because we can't walk
1269 * the list with qdlock and grab dq_interlock to read the entries at
1270 * the same time. So we're going to do two passes: one to figure out
1271 * which IDs we want and fill in the keys, and then a second to use
1272 * the keys to fetch the values.
1273 */
1274 int
1275 lfsquota2_handle_cmd_cursorget(struct ulfsmount *ump, struct quotakcursor *qkc,
1276 struct quotakey *keys, struct quotaval *vals, unsigned maxreturn,
1277 unsigned *ret)
1278 {
1279 int error;
1280 struct ulfsq2_cursor *cursor;
1281 struct ulfsq2_cursor newcursor;
1282 struct q2cursor_state state;
1283 struct quota2_entry default_q2e;
1284 int idtype;
1285 int quota2_hash_size;
1286
1287 /*
1288 * Convert and validate the cursor.
1289 */
1290 cursor = Q2CURSOR(qkc);
1291 error = q2cursor_check(cursor);
1292 if (error) {
1293 return error;
1294 }
1295
1296 /*
1297 * Make sure our on-disk codes match the values of the
1298 * FS-independent ones. This avoids the need for explicit
1299 * conversion (which would be a NOP anyway and thus easily
1300 * left out or called in the wrong places...)
1301 */
1302 CTASSERT(QUOTA_IDTYPE_USER == ULFS_USRQUOTA);
1303 CTASSERT(QUOTA_IDTYPE_GROUP == ULFS_GRPQUOTA);
1304 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
1305 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
1306
1307 /*
1308 * If some of the idtypes aren't configured/enabled, arrange
1309 * to skip over them.
1310 */
1311 if (cursor->q2c_users_done == 0 &&
1312 ump->um_quotas[ULFS_USRQUOTA] == NULLVP) {
1313 cursor->q2c_users_done = 1;
1314 }
1315 if (cursor->q2c_groups_done == 0 &&
1316 ump->um_quotas[ULFS_GRPQUOTA] == NULLVP) {
1317 cursor->q2c_groups_done = 1;
1318 }
1319
1320 /* Loop over, potentially, both idtypes */
1321 while (1) {
1322
1323 /* Choose id type */
1324 error = q2cursor_pickidtype(cursor, &idtype);
1325 if (error == EAGAIN) {
1326 /* nothing more to do, return 0 */
1327 *ret = 0;
1328 return 0;
1329 }
1330 KASSERT(ump->um_quotas[idtype] != NULLVP);
1331
1332 /*
1333 * Initialize the per-call iteration state. Copy the
1334 * cursor state so we can update it in place but back
1335 * out on error.
1336 */
1337 q2cursor_initstate(&state, keys, vals, maxreturn,
1338 cursor->q2c_blocks_done);
1339 newcursor = *cursor;
1340
1341 /* Assign keys */
1342 error = q2cursor_getkeys(ump, idtype, &newcursor, &state,
1343 "a2_hash_size, &default_q2e);
1344 if (error) {
1345 return error;
1346 }
1347
1348 /* Now fill in the values. */
1349 error = q2cursor_getvals(ump, &state, &default_q2e);
1350 if (error) {
1351 return error;
1352 }
1353
1354 /*
1355 * Now that we aren't going to fail and lose what we
1356 * did so far, we can update the cursor state.
1357 */
1358
1359 if (newcursor.q2c_hashpos >= quota2_hash_size) {
1360 if (idtype == QUOTA_IDTYPE_USER)
1361 cursor->q2c_users_done = 1;
1362 else
1363 cursor->q2c_groups_done = 1;
1364
1365 /* start over on another id type */
1366 cursor->q2c_hashsize = 0;
1367 cursor->q2c_defaults_done = 0;
1368 cursor->q2c_hashpos = 0;
1369 cursor->q2c_uidpos = 0;
1370 cursor->q2c_blocks_done = 0;
1371 } else {
1372 *cursor = newcursor;
1373 cursor->q2c_blocks_done = state.skiplast;
1374 }
1375
1376 /*
1377 * If we have something to return, return it.
1378 * Otherwise, continue to the other idtype, if any,
1379 * and only return zero at end of iteration.
1380 */
1381 if (state.numkeys > 0) {
1382 break;
1383 }
1384 }
1385
1386 *ret = state.numkeys;
1387 return 0;
1388 }
1389
1390 int
1391 lfsquota2_handle_cmd_cursoropen(struct ulfsmount *ump, struct quotakcursor *qkc)
1392 {
1393 struct ulfsq2_cursor *cursor;
1394
1395 CTASSERT(sizeof(*cursor) <= sizeof(qkc->u.qkc_space));
1396 cursor = Q2CURSOR(qkc);
1397
1398 cursor->q2c_magic = Q2C_MAGIC;
1399 cursor->q2c_hashsize = 0;
1400
1401 cursor->q2c_users_done = 0;
1402 cursor->q2c_groups_done = 0;
1403 cursor->q2c_defaults_done = 0;
1404 cursor->q2c_hashpos = 0;
1405 cursor->q2c_uidpos = 0;
1406 cursor->q2c_blocks_done = 0;
1407 return 0;
1408 }
1409
1410 int
1411 lfsquota2_handle_cmd_cursorclose(struct ulfsmount *ump, struct quotakcursor *qkc)
1412 {
1413 struct ulfsq2_cursor *cursor;
1414 int error;
1415
1416 cursor = Q2CURSOR(qkc);
1417 error = q2cursor_check(cursor);
1418 if (error) {
1419 return error;
1420 }
1421
1422 /* nothing to do */
1423
1424 return 0;
1425 }
1426
1427 int
1428 lfsquota2_handle_cmd_cursorskipidtype(struct ulfsmount *ump,
1429 struct quotakcursor *qkc, int idtype)
1430 {
1431 struct ulfsq2_cursor *cursor;
1432 int error;
1433
1434 cursor = Q2CURSOR(qkc);
1435 error = q2cursor_check(cursor);
1436 if (error) {
1437 return error;
1438 }
1439
1440 switch (idtype) {
1441 case QUOTA_IDTYPE_USER:
1442 cursor->q2c_users_done = 1;
1443 break;
1444 case QUOTA_IDTYPE_GROUP:
1445 cursor->q2c_groups_done = 1;
1446 break;
1447 default:
1448 return EINVAL;
1449 }
1450
1451 return 0;
1452 }
1453
1454 int
1455 lfsquota2_handle_cmd_cursoratend(struct ulfsmount *ump, struct quotakcursor *qkc,
1456 int *ret)
1457 {
1458 struct ulfsq2_cursor *cursor;
1459 int error;
1460
1461 cursor = Q2CURSOR(qkc);
1462 error = q2cursor_check(cursor);
1463 if (error) {
1464 return error;
1465 }
1466
1467 *ret = (cursor->q2c_users_done && cursor->q2c_groups_done);
1468 return 0;
1469 }
1470
1471 int
1472 lfsquota2_handle_cmd_cursorrewind(struct ulfsmount *ump, struct quotakcursor *qkc)
1473 {
1474 struct ulfsq2_cursor *cursor;
1475 int error;
1476
1477 cursor = Q2CURSOR(qkc);
1478 error = q2cursor_check(cursor);
1479 if (error) {
1480 return error;
1481 }
1482
1483 cursor->q2c_hashsize = 0;
1484
1485 cursor->q2c_users_done = 0;
1486 cursor->q2c_groups_done = 0;
1487 cursor->q2c_defaults_done = 0;
1488 cursor->q2c_hashpos = 0;
1489 cursor->q2c_uidpos = 0;
1490 cursor->q2c_blocks_done = 0;
1491
1492 return 0;
1493 }
1494
1495 int
1496 lfs_q2sync(struct mount *mp)
1497 {
1498 return 0;
1499 }
1500
1501 struct dq2get_callback {
1502 uid_t id;
1503 struct dquot *dq;
1504 };
1505
1506 static int
1507 dq2get_callback(struct ulfsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
1508 uint64_t off, void *v)
1509 {
1510 struct dq2get_callback *c = v;
1511 daddr_t lblkno;
1512 int blkoff;
1513 #ifdef LFS_EI
1514 struct lfs *fs = ump->um_lfs;
1515 const int needswap = ULFS_MPNEEDSWAP(fs);
1516 #endif
1517
1518 if (ulfs_rw32(q2e->q2e_uid, needswap) == c->id) {
1519 KASSERT(mutex_owned(&c->dq->dq_interlock));
1520 lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
1521 blkoff = (off & ump->umq2_bmask);
1522 c->dq->dq2_lblkno = lblkno;
1523 c->dq->dq2_blkoff = blkoff;
1524 return Q2WL_ABORT;
1525 }
1526 return 0;
1527 }
1528
1529 int
1530 lfs_dq2get(struct vnode *dqvp, u_long id, struct ulfsmount *ump, int type,
1531 struct dquot *dq)
1532 {
1533 struct buf *bp;
1534 struct quota2_header *q2h;
1535 int error;
1536 daddr_t offset;
1537 u_long hash_mask;
1538 struct dq2get_callback c = {
1539 .id = id,
1540 .dq = dq
1541 };
1542
1543 KASSERT(mutex_owned(&dq->dq_interlock));
1544 mutex_enter(&lfs_dqlock);
1545 error = getq2h(ump, type, &bp, &q2h, 0);
1546 if (error)
1547 goto out_mutex;
1548 /* look for our entry */
1549 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
1550 offset = q2h->q2h_entries[id & hash_mask];
1551 error = quota2_walk_list(ump, bp, type, &offset, 0, (void *)&c,
1552 dq2get_callback);
1553 brelse(bp, 0);
1554 out_mutex:
1555 mutex_exit(&lfs_dqlock);
1556 return error;
1557 }
1558
1559 int
1560 lfs_dq2sync(struct vnode *vp, struct dquot *dq)
1561 {
1562 return 0;
1563 }
1564
1565 int
1566 lfs_quota2_mount(struct mount *mp)
1567 {
1568 struct ulfsmount *ump = VFSTOULFS(mp);
1569 struct lfs *fs = ump->um_lfs;
1570 int error = 0;
1571 struct vnode *vp;
1572 struct lwp *l = curlwp;
1573
1574 if ((fs->lfs_use_quota2) == 0)
1575 return 0;
1576
1577 fs->um_flags |= ULFS_QUOTA2;
1578 ump->umq2_bsize = fs->lfs_bsize;
1579 ump->umq2_bmask = fs->lfs_bmask;
1580 if (fs->lfs_quota_magic != Q2_HEAD_MAGIC) {
1581 printf("%s: Invalid quota magic number\n",
1582 mp->mnt_stat.f_mntonname);
1583 return EINVAL;
1584 }
1585 if ((fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_USRQUOTA)) &&
1586 fs->lfs_quotaino[ULFS_USRQUOTA] == 0) {
1587 printf("%s: no user quota inode\n",
1588 mp->mnt_stat.f_mntonname);
1589 error = EINVAL;
1590 }
1591 if ((fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_GRPQUOTA)) &&
1592 fs->lfs_quotaino[ULFS_GRPQUOTA] == 0) {
1593 printf("%s: no group quota inode\n",
1594 mp->mnt_stat.f_mntonname);
1595 error = EINVAL;
1596 }
1597 if (error)
1598 return error;
1599
1600 if (fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_USRQUOTA) &&
1601 ump->um_quotas[ULFS_USRQUOTA] == NULLVP) {
1602 error = VFS_VGET(mp, fs->lfs_quotaino[ULFS_USRQUOTA], &vp);
1603 if (error) {
1604 printf("%s: can't vget() user quota inode: %d\n",
1605 mp->mnt_stat.f_mntonname, error);
1606 return error;
1607 }
1608 ump->um_quotas[ULFS_USRQUOTA] = vp;
1609 ump->um_cred[ULFS_USRQUOTA] = l->l_cred;
1610 mutex_enter(vp->v_interlock);
1611 vp->v_writecount++;
1612 mutex_exit(vp->v_interlock);
1613 VOP_UNLOCK(vp);
1614 }
1615 if (fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_GRPQUOTA) &&
1616 ump->um_quotas[ULFS_GRPQUOTA] == NULLVP) {
1617 error = VFS_VGET(mp, fs->lfs_quotaino[ULFS_GRPQUOTA], &vp);
1618 if (error) {
1619 vn_close(ump->um_quotas[ULFS_USRQUOTA],
1620 FREAD|FWRITE, l->l_cred);
1621 printf("%s: can't vget() group quota inode: %d\n",
1622 mp->mnt_stat.f_mntonname, error);
1623 return error;
1624 }
1625 ump->um_quotas[ULFS_GRPQUOTA] = vp;
1626 ump->um_cred[ULFS_GRPQUOTA] = l->l_cred;
1627 mutex_enter(vp->v_interlock);
1628 vp->v_vflag |= VV_SYSTEM;
1629 vp->v_writecount++;
1630 mutex_exit(vp->v_interlock);
1631 VOP_UNLOCK(vp);
1632 }
1633 mp->mnt_flag |= MNT_QUOTA;
1634 return 0;
1635 }
1636