1 /* $NetBSD: ufs_quota2.c,v 1.48 2026/01/22 03:24:19 riastradh Exp $ */ 2 /*- 3 * Copyright (c) 2010 Manuel Bouyer 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 16 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 17 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 19 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: ufs_quota2.c,v 1.48 2026/01/22 03:24:19 riastradh Exp $"); 30 31 #include <sys/param.h> 32 #include <sys/types.h> 33 34 #include <sys/buf.h> 35 #include <sys/file.h> 36 #include <sys/kauth.h> 37 #include <sys/kernel.h> 38 #include <sys/mount.h> 39 #include <sys/namei.h> 40 #include <sys/proc.h> 41 #include <sys/quota.h> 42 #include <sys/quotactl.h> 43 #include <sys/sdt.h> 44 #include <sys/systm.h> 45 #include <sys/vnode.h> 46 #include <sys/wapbl.h> 47 48 #include <ufs/ufs/inode.h> 49 #include <ufs/ufs/quota2.h> 50 #include <ufs/ufs/ufs_bswap.h> 51 #include <ufs/ufs/ufs_extern.h> 52 #include <ufs/ufs/ufs_quota.h> 53 #include <ufs/ufs/ufs_wapbl.h> 54 #include <ufs/ufs/ufsmount.h> 55 56 /* 57 * LOCKING: 58 * Data in the entries are protected by the associated struct dquot's 59 * dq_interlock (this means we can't read or change a quota entry without 60 * grabbing a dquot for it). 61 * The header and lists (including pointers in the data entries, and q2e_uid) 62 * are protected by the global dqlock. 63 * the locking order is dq_interlock -> dqlock 64 */ 65 66 static int quota2_bwrite(struct mount *, struct buf *); 67 static int getinoquota2(struct inode *, bool, bool, struct buf **, 68 struct quota2_entry **); 69 static int getq2h(struct ufsmount *, int, struct buf **, 70 struct quota2_header **, int); 71 static int getq2e(struct ufsmount *, int, daddr_t, int, struct buf **, 72 struct quota2_entry **, int); 73 static int quota2_walk_list(struct ufsmount *, struct buf *, int, 74 uint64_t *, int, void *, 75 int (*func)(struct ufsmount *, uint64_t *, struct quota2_entry *, 76 uint64_t, void *)); 77 78 static const char *limnames[] = INITQLNAMES; 79 80 static void 81 quota2_dict_update_q2e_limits(int objtype, const struct quotaval *val, 82 struct quota2_entry *q2e) 83 { 84 /* make sure we can index q2e_val[] by the fs-independent objtype */ 85 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK); 86 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE); 87 88 q2e->q2e_val[objtype].q2v_hardlimit = val->qv_hardlimit; 89 q2e->q2e_val[objtype].q2v_softlimit = val->qv_softlimit; 90 q2e->q2e_val[objtype].q2v_grace = val->qv_grace; 91 } 92 93 /* 94 * Convert internal representation to FS-independent representation. 95 * (Note that while the two types are currently identical, the 96 * internal representation is an on-disk struct and the FS-independent 97 * representation is not, and they might diverge in the future.) 98 */ 99 static void 100 q2val_to_quotaval(struct quota2_val *q2v, struct quotaval *qv) 101 { 102 qv->qv_softlimit = q2v->q2v_softlimit; 103 qv->qv_hardlimit = q2v->q2v_hardlimit; 104 qv->qv_usage = q2v->q2v_cur; 105 qv->qv_expiretime = q2v->q2v_time; 106 qv->qv_grace = q2v->q2v_grace; 107 } 108 109 /* 110 * Convert a quota2entry and default-flag to the FS-independent 111 * representation. 112 */ 113 static void 114 q2e_to_quotaval(struct quota2_entry *q2e, int def, 115 id_t *id, int objtype, struct quotaval *ret) 116 { 117 if (def) { 118 *id = QUOTA_DEFAULTID; 119 } else { 120 *id = q2e->q2e_uid; 121 } 122 123 KASSERT(objtype >= 0 && objtype < N_QL); 124 q2val_to_quotaval(&q2e->q2e_val[objtype], ret); 125 } 126 127 128 static int 129 quota2_bwrite(struct mount *mp, struct buf *bp) 130 { 131 if (mp->mnt_flag & MNT_SYNCHRONOUS) 132 return bwrite(bp); 133 else { 134 bdwrite(bp); 135 return 0; 136 } 137 } 138 139 static int 140 getq2h(struct ufsmount *ump, int type, 141 struct buf **bpp, struct quota2_header **q2hp, int flags) 142 { 143 const int needswap = UFS_MPNEEDSWAP(ump); 144 int error; 145 struct buf *bp; 146 struct quota2_header *q2h; 147 148 KASSERT(mutex_owned(&dqlock)); 149 error = bread(ump->um_quotas[type], 0, ump->umq2_bsize, 150 flags, &bp); 151 if (error) 152 return error; 153 if (bp->b_resid != 0) 154 panic("dq2get: %s quota file truncated", quotatypes[type]); 155 156 q2h = (void *)bp->b_data; 157 if (ufs_rw32(q2h->q2h_magic_number, needswap) != Q2_HEAD_MAGIC || 158 q2h->q2h_type != type) 159 panic("dq2get: corrupted %s quota header", quotatypes[type]); 160 *bpp = bp; 161 *q2hp = q2h; 162 return 0; 163 } 164 165 static int 166 getq2e(struct ufsmount *ump, int type, daddr_t lblkno, int blkoffset, 167 struct buf **bpp, struct quota2_entry **q2ep, int flags) 168 { 169 int error; 170 struct buf *bp; 171 172 if (blkoffset & (sizeof(uint64_t) - 1)) { 173 panic("dq2get: %s quota file corrupted", 174 quotatypes[type]); 175 } 176 error = bread(ump->um_quotas[type], lblkno, ump->umq2_bsize, 177 flags, &bp); 178 if (error) 179 return error; 180 if (bp->b_resid != 0) { 181 panic("dq2get: %s quota file corrupted", 182 quotatypes[type]); 183 } 184 *q2ep = (void *)((char *)bp->b_data + blkoffset); 185 *bpp = bp; 186 return 0; 187 } 188 189 /* walk a quota entry list, calling the callback for each entry */ 190 #define Q2WL_ABORT 0x10000000 191 192 static int 193 quota2_walk_list(struct ufsmount *ump, struct buf *hbp, int type, 194 uint64_t *offp, int flags, void *a, 195 int (*func)(struct ufsmount *, uint64_t *, struct quota2_entry *, uint64_t, 196 void *)) 197 { 198 const int needswap = UFS_MPNEEDSWAP(ump); 199 daddr_t off = ufs_rw64(*offp, needswap); 200 struct buf *bp, *obp = hbp; 201 int ret = 0, ret2 = 0; 202 struct quota2_entry *q2e; 203 daddr_t lblkno, blkoff, olblkno = 0; 204 205 KASSERT(mutex_owned(&dqlock)); 206 207 while (off != 0) { 208 lblkno = (off >> ump->um_mountp->mnt_fs_bshift); 209 blkoff = (off & ump->umq2_bmask); 210 if (lblkno == 0) { 211 /* in the header block */ 212 bp = hbp; 213 } else if (lblkno == olblkno) { 214 /* still in the same buf */ 215 bp = obp; 216 } else { 217 ret = bread(ump->um_quotas[type], lblkno, 218 ump->umq2_bsize, flags, &bp); 219 if (ret) 220 return ret; 221 if (bp->b_resid != 0) { 222 panic("%s: %s quota file corrupted", 223 __func__, quotatypes[type]); 224 } 225 } 226 q2e = (void *)((char *)(bp->b_data) + blkoff); 227 ret = (*func)(ump, offp, q2e, off, a); 228 if (off != ufs_rw64(*offp, needswap)) { 229 /* callback changed parent's pointer, redo */ 230 off = ufs_rw64(*offp, needswap); 231 if (bp != hbp && bp != obp) 232 ret2 = bwrite(bp); 233 } else { 234 /* parent if now current */ 235 if (obp != bp && obp != hbp) { 236 if (flags & B_MODIFY) 237 ret2 = bwrite(obp); 238 else 239 brelse(obp, 0); 240 } 241 obp = bp; 242 olblkno = lblkno; 243 offp = &(q2e->q2e_next); 244 off = ufs_rw64(*offp, needswap); 245 } 246 if (ret) 247 break; 248 if (ret2) { 249 ret = ret2; 250 break; 251 } 252 } 253 if (obp != hbp) { 254 if (flags & B_MODIFY) 255 ret2 = bwrite(obp); 256 else 257 brelse(obp, 0); 258 } 259 if (ret & Q2WL_ABORT) 260 return 0; 261 if (ret == 0) 262 return ret2; 263 return ret; 264 } 265 266 int 267 quota2_umount(struct mount *mp, int flags) 268 { 269 int i, error; 270 struct ufsmount *ump = VFSTOUFS(mp); 271 272 if ((ump->um_flags & UFS_QUOTA2) == 0) 273 return 0; 274 275 for (i = 0; i < MAXQUOTAS; i++) { 276 if (ump->um_quotas[i] != NULLVP) { 277 error = vn_close(ump->um_quotas[i], FREAD|FWRITE, 278 ump->um_cred[i]); 279 if (error) { 280 printf("quota2_umount failed: close(%p) %d\n", 281 ump->um_quotas[i], error); 282 return error; 283 } 284 } 285 ump->um_quotas[i] = NULLVP; 286 } 287 return 0; 288 } 289 290 static int 291 quota2_q2ealloc(struct ufsmount *ump, int type, uid_t uid, struct dquot *dq) 292 { 293 int error, error2; 294 struct buf *hbp, *bp; 295 struct quota2_header *q2h; 296 struct quota2_entry *q2e; 297 daddr_t offset; 298 u_long hash_mask; 299 const int needswap = UFS_MPNEEDSWAP(ump); 300 301 KASSERT(mutex_owned(&dq->dq_interlock)); 302 KASSERT(mutex_owned(&dqlock)); 303 error = getq2h(ump, type, &hbp, &q2h, B_MODIFY); 304 if (error) 305 return error; 306 offset = ufs_rw64(q2h->q2h_free, needswap); 307 if (offset == 0) { 308 struct vnode *vp = ump->um_quotas[type]; 309 struct inode *ip = VTOI(vp); 310 uint64_t size = ip->i_size; 311 /* need to allocate a new disk block */ 312 error = UFS_BALLOC(vp, size, ump->umq2_bsize, 313 ump->um_cred[type], B_CLRBUF | B_SYNC, &bp); 314 if (error) { 315 brelse(hbp, 0); 316 return error; 317 } 318 KASSERT((ip->i_size % ump->umq2_bsize) == 0); 319 ip->i_size += ump->umq2_bsize; 320 DIP_ASSIGN(ip, size, ip->i_size); 321 ip->i_flag |= IN_CHANGE | IN_UPDATE; 322 uvm_vnp_setsize(vp, ip->i_size); 323 quota2_addfreeq2e(q2h, bp->b_data, size, ump->umq2_bsize, 324 needswap); 325 error = bwrite(bp); 326 error2 = UFS_UPDATE(vp, NULL, NULL, UPDATE_WAIT); 327 if (error || error2) { 328 brelse(hbp, 0); 329 if (error) 330 return error; 331 return error2; 332 } 333 offset = ufs_rw64(q2h->q2h_free, needswap); 334 KASSERT(offset != 0); 335 } 336 dq->dq2_lblkno = (offset >> ump->um_mountp->mnt_fs_bshift); 337 dq->dq2_blkoff = (offset & ump->umq2_bmask); 338 if (dq->dq2_lblkno == 0) { 339 bp = hbp; 340 q2e = (void *)((char *)bp->b_data + dq->dq2_blkoff); 341 } else { 342 error = getq2e(ump, type, dq->dq2_lblkno, 343 dq->dq2_blkoff, &bp, &q2e, B_MODIFY); 344 if (error) { 345 brelse(hbp, 0); 346 return error; 347 } 348 } 349 hash_mask = ((1 << q2h->q2h_hash_shift) - 1); 350 /* remove from free list */ 351 q2h->q2h_free = q2e->q2e_next; 352 353 memcpy(q2e, &q2h->q2h_defentry, sizeof(*q2e)); 354 q2e->q2e_uid = ufs_rw32(uid, needswap); 355 /* insert in hash list */ 356 q2e->q2e_next = q2h->q2h_entries[uid & hash_mask]; 357 q2h->q2h_entries[uid & hash_mask] = ufs_rw64(offset, needswap); 358 if (hbp != bp) { 359 bwrite(hbp); 360 } 361 bwrite(bp); 362 return 0; 363 } 364 365 static int 366 getinoquota2(struct inode *ip, bool alloc, bool modify, struct buf **bpp, 367 struct quota2_entry **q2ep) 368 { 369 int error; 370 int i; 371 struct dquot *dq; 372 struct ufsmount *ump = ip->i_ump; 373 u_int32_t ino_ids[MAXQUOTAS]; 374 375 error = getinoquota(ip); 376 if (error) 377 return error; 378 379 if (alloc) { 380 UFS_WAPBL_JLOCK_ASSERT(ump->um_mountp); 381 } 382 ino_ids[USRQUOTA] = ip->i_uid; 383 ino_ids[GRPQUOTA] = ip->i_gid; 384 /* first get the interlock for all dquot */ 385 for (i = 0; i < MAXQUOTAS; i++) { 386 dq = ip->i_dquot[i]; 387 if (dq == NODQUOT) 388 continue; 389 mutex_enter(&dq->dq_interlock); 390 } 391 /* now get the corresponding quota entry */ 392 for (i = 0; i < MAXQUOTAS; i++) { 393 bpp[i] = NULL; 394 q2ep[i] = NULL; 395 dq = ip->i_dquot[i]; 396 if (dq == NODQUOT) 397 continue; 398 if (__predict_false(ump->um_quotas[i] == NULL)) { 399 /* 400 * quotas have been turned off. This can happen 401 * at umount time. 402 */ 403 mutex_exit(&dq->dq_interlock); 404 dqrele(NULLVP, dq); 405 ip->i_dquot[i] = NULL; 406 continue; 407 } 408 409 if ((dq->dq2_lblkno | dq->dq2_blkoff) == 0) { 410 if (!alloc) { 411 continue; 412 } 413 /* need to alloc a new on-disk quot */ 414 mutex_enter(&dqlock); 415 error = quota2_q2ealloc(ump, i, ino_ids[i], dq); 416 mutex_exit(&dqlock); 417 if (error) 418 return error; 419 } 420 KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0); 421 error = getq2e(ump, i, dq->dq2_lblkno, 422 dq->dq2_blkoff, &bpp[i], &q2ep[i], 423 modify ? B_MODIFY : 0); 424 if (error) 425 return error; 426 } 427 return 0; 428 } 429 430 __inline static int __unused 431 quota2_check_limit(struct quota2_val *q2v, uint64_t change, time_t now) 432 { 433 return quota_check_limit(q2v->q2v_cur, change, q2v->q2v_softlimit, 434 q2v->q2v_hardlimit, q2v->q2v_time, now); 435 } 436 437 static int 438 quota2_check(struct inode *ip, int vtype, int64_t change, kauth_cred_t cred, 439 int flags) 440 { 441 int error; 442 struct buf *bp[MAXQUOTAS]; 443 struct quota2_entry *q2e[MAXQUOTAS]; 444 struct quota2_val *q2vp; 445 struct dquot *dq; 446 uint64_t ncurblks; 447 struct ufsmount *ump = ip->i_ump; 448 struct mount *mp = ump->um_mountp; 449 const int needswap = UFS_MPNEEDSWAP(ump); 450 int i; 451 452 if ((error = getinoquota2(ip, change > 0, change != 0, bp, q2e)) != 0) 453 return error; 454 if (change == 0) { 455 for (i = 0; i < MAXQUOTAS; i++) { 456 dq = ip->i_dquot[i]; 457 if (dq == NODQUOT) 458 continue; 459 if (bp[i]) 460 brelse(bp[i], 0); 461 mutex_exit(&dq->dq_interlock); 462 } 463 return 0; 464 } 465 if (change < 0) { 466 for (i = 0; i < MAXQUOTAS; i++) { 467 dq = ip->i_dquot[i]; 468 if (dq == NODQUOT) 469 continue; 470 if (q2e[i] == NULL) { 471 mutex_exit(&dq->dq_interlock); 472 continue; 473 } 474 q2vp = &q2e[i]->q2e_val[vtype]; 475 ncurblks = ufs_rw64(q2vp->q2v_cur, needswap); 476 if (ncurblks < -change) 477 ncurblks = 0; 478 else 479 ncurblks += change; 480 q2vp->q2v_cur = ufs_rw64(ncurblks, needswap); 481 quota2_bwrite(mp, bp[i]); 482 mutex_exit(&dq->dq_interlock); 483 } 484 return 0; 485 } 486 /* see if the allocation is allowed */ 487 for (i = 0; i < MAXQUOTAS; i++) { 488 struct quota2_val q2v; 489 int ql_stat; 490 dq = ip->i_dquot[i]; 491 if (dq == NODQUOT) 492 continue; 493 KASSERT(q2e[i] != NULL); 494 quota2_ufs_rwq2v(&q2e[i]->q2e_val[vtype], &q2v, needswap); 495 ql_stat = quota2_check_limit(&q2v, change, time_second); 496 497 if ((flags & FORCE) == 0 && 498 kauth_authorize_system(cred, KAUTH_SYSTEM_FS_QUOTA, 499 KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT, 500 KAUTH_ARG(i), KAUTH_ARG(vtype), NULL) != 0) { 501 /* enforce this limit */ 502 switch(QL_STATUS(ql_stat)) { 503 case QL_S_DENY_HARD: 504 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) { 505 uprintf("\n%s: write failed, %s %s " 506 "limit reached\n", 507 mp->mnt_stat.f_mntonname, 508 quotatypes[i], limnames[vtype]); 509 dq->dq_flags |= DQ_WARN(vtype); 510 } 511 error = SET_ERROR(EDQUOT); 512 break; 513 case QL_S_DENY_GRACE: 514 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) { 515 uprintf("\n%s: write failed, %s %s " 516 "limit reached\n", 517 mp->mnt_stat.f_mntonname, 518 quotatypes[i], limnames[vtype]); 519 dq->dq_flags |= DQ_WARN(vtype); 520 } 521 error = SET_ERROR(EDQUOT); 522 break; 523 case QL_S_ALLOW_SOFT: 524 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) { 525 uprintf("\n%s: warning, %s %s " 526 "quota exceeded\n", 527 mp->mnt_stat.f_mntonname, 528 quotatypes[i], limnames[vtype]); 529 dq->dq_flags |= DQ_WARN(vtype); 530 } 531 break; 532 } 533 } 534 /* 535 * always do this; we don't know if the allocation will 536 * succed or not in the end. if we don't do the allocation 537 * q2v_time will be ignored anyway 538 */ 539 if (ql_stat & QL_F_CROSS) { 540 q2v.q2v_time = time_second + q2v.q2v_grace; 541 quota2_ufs_rwq2v(&q2v, &q2e[i]->q2e_val[vtype], 542 needswap); 543 } 544 } 545 546 /* now do the allocation if allowed */ 547 for (i = 0; i < MAXQUOTAS; i++) { 548 dq = ip->i_dquot[i]; 549 if (dq == NODQUOT) 550 continue; 551 KASSERT(q2e[i] != NULL); 552 if (error == 0) { 553 q2vp = &q2e[i]->q2e_val[vtype]; 554 ncurblks = ufs_rw64(q2vp->q2v_cur, needswap); 555 q2vp->q2v_cur = ufs_rw64(ncurblks + change, needswap); 556 quota2_bwrite(mp, bp[i]); 557 } else 558 brelse(bp[i], 0); 559 mutex_exit(&dq->dq_interlock); 560 } 561 return error; 562 } 563 564 int 565 chkdq2(struct inode *ip, int64_t change, kauth_cred_t cred, int flags) 566 { 567 return quota2_check(ip, QL_BLOCK, change, cred, flags); 568 } 569 570 int 571 chkiq2(struct inode *ip, int32_t change, kauth_cred_t cred, int flags) 572 { 573 return quota2_check(ip, QL_FILE, change, cred, flags); 574 } 575 576 int 577 quota2_handle_cmd_put(struct ufsmount *ump, const struct quotakey *key, 578 const struct quotaval *val) 579 { 580 int error; 581 struct dquot *dq; 582 struct quota2_header *q2h; 583 struct quota2_entry q2e, *q2ep; 584 struct buf *bp; 585 const int needswap = UFS_MPNEEDSWAP(ump); 586 587 /* make sure we can index by the fs-independent idtype */ 588 CTASSERT(QUOTA_IDTYPE_USER == USRQUOTA); 589 CTASSERT(QUOTA_IDTYPE_GROUP == GRPQUOTA); 590 591 if (ump->um_quotas[key->qk_idtype] == NULLVP) 592 return SET_ERROR(ENODEV); 593 error = UFS_WAPBL_BEGIN(ump->um_mountp); 594 if (error) 595 return error; 596 597 if (key->qk_id == QUOTA_DEFAULTID) { 598 mutex_enter(&dqlock); 599 error = getq2h(ump, key->qk_idtype, &bp, &q2h, B_MODIFY); 600 if (error) { 601 mutex_exit(&dqlock); 602 goto out_wapbl; 603 } 604 quota2_ufs_rwq2e(&q2h->q2h_defentry, &q2e, needswap); 605 quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e); 606 quota2_ufs_rwq2e(&q2e, &q2h->q2h_defentry, needswap); 607 mutex_exit(&dqlock); 608 quota2_bwrite(ump->um_mountp, bp); 609 goto out_wapbl; 610 } 611 612 error = dqget(NULLVP, key->qk_id, ump, key->qk_idtype, &dq); 613 if (error) 614 goto out_wapbl; 615 616 mutex_enter(&dq->dq_interlock); 617 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) { 618 /* need to alloc a new on-disk quot */ 619 mutex_enter(&dqlock); 620 error = quota2_q2ealloc(ump, key->qk_idtype, key->qk_id, dq); 621 mutex_exit(&dqlock); 622 if (error) 623 goto out_il; 624 } 625 KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0); 626 error = getq2e(ump, key->qk_idtype, dq->dq2_lblkno, 627 dq->dq2_blkoff, &bp, &q2ep, B_MODIFY); 628 if (error) 629 goto out_il; 630 631 quota2_ufs_rwq2e(q2ep, &q2e, needswap); 632 /* 633 * Reset time limit if previously had no soft limit or were 634 * under it, but now have a soft limit and are over it. 635 */ 636 if (val->qv_softlimit && 637 q2e.q2e_val[key->qk_objtype].q2v_cur >= val->qv_softlimit && 638 (q2e.q2e_val[key->qk_objtype].q2v_softlimit == 0 || 639 (q2e.q2e_val[key->qk_objtype].q2v_cur < 640 q2e.q2e_val[key->qk_objtype].q2v_softlimit))) { 641 q2e.q2e_val[key->qk_objtype].q2v_time = 642 time_second + val->qv_grace; 643 } 644 quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e); 645 quota2_ufs_rwq2e(&q2e, q2ep, needswap); 646 quota2_bwrite(ump->um_mountp, bp); 647 648 out_il: 649 mutex_exit(&dq->dq_interlock); 650 dqrele(NULLVP, dq); 651 out_wapbl: 652 UFS_WAPBL_END(ump->um_mountp); 653 return error; 654 } 655 656 struct dq2clear_callback { 657 uid_t id; 658 struct dquot *dq; 659 struct quota2_header *q2h; 660 }; 661 662 static int 663 dq2clear_callback(struct ufsmount *ump, uint64_t *offp, 664 struct quota2_entry *q2e, 665 uint64_t off, void *v) 666 { 667 struct dq2clear_callback *c = v; 668 const int needswap = UFS_MPNEEDSWAP(ump); 669 uint64_t myoff; 670 671 if (ufs_rw32(q2e->q2e_uid, needswap) == c->id) { 672 KASSERT(mutex_owned(&c->dq->dq_interlock)); 673 c->dq->dq2_lblkno = 0; 674 c->dq->dq2_blkoff = 0; 675 myoff = *offp; 676 /* remove from hash list */ 677 *offp = q2e->q2e_next; 678 /* add to free list */ 679 q2e->q2e_next = c->q2h->q2h_free; 680 c->q2h->q2h_free = myoff; 681 return Q2WL_ABORT; 682 } 683 return 0; 684 } 685 int 686 quota2_handle_cmd_del(struct ufsmount *ump, const struct quotakey *qk) 687 { 688 int idtype; 689 id_t id; 690 int objtype; 691 int error, i, canfree; 692 struct dquot *dq; 693 struct quota2_header *q2h; 694 struct quota2_entry q2e, *q2ep; 695 struct buf *hbp, *bp; 696 u_long hash_mask; 697 struct dq2clear_callback c; 698 699 idtype = qk->qk_idtype; 700 id = qk->qk_id; 701 objtype = qk->qk_objtype; 702 703 if (ump->um_quotas[idtype] == NULLVP) 704 return SET_ERROR(ENODEV); 705 if (id == QUOTA_DEFAULTID) 706 return SET_ERROR(EOPNOTSUPP); 707 708 /* get the default entry before locking the entry's buffer */ 709 mutex_enter(&dqlock); 710 error = getq2h(ump, idtype, &hbp, &q2h, 0); 711 if (error) { 712 mutex_exit(&dqlock); 713 return error; 714 } 715 /* we'll copy to another disk entry, so no need to swap */ 716 memcpy(&q2e, &q2h->q2h_defentry, sizeof(q2e)); 717 mutex_exit(&dqlock); 718 brelse(hbp, 0); 719 720 error = dqget(NULLVP, id, ump, idtype, &dq); 721 if (error) 722 return error; 723 724 mutex_enter(&dq->dq_interlock); 725 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) { 726 /* already clear, nothing to do */ 727 error = SET_ERROR(ENOENT); 728 goto out_il; 729 } 730 error = UFS_WAPBL_BEGIN(ump->um_mountp); 731 if (error) 732 goto out_dq; 733 734 error = getq2e(ump, idtype, dq->dq2_lblkno, dq->dq2_blkoff, 735 &bp, &q2ep, B_MODIFY); 736 if (error) 737 goto out_wapbl; 738 739 /* make sure we can index by the objtype passed in */ 740 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK); 741 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE); 742 743 /* clear the requested objtype by copying from the default entry */ 744 q2ep->q2e_val[objtype].q2v_softlimit = 745 q2e.q2e_val[objtype].q2v_softlimit; 746 q2ep->q2e_val[objtype].q2v_hardlimit = 747 q2e.q2e_val[objtype].q2v_hardlimit; 748 q2ep->q2e_val[objtype].q2v_grace = 749 q2e.q2e_val[objtype].q2v_grace; 750 q2ep->q2e_val[objtype].q2v_time = 0; 751 752 /* if this entry now contains no information, we can free it */ 753 canfree = 1; 754 for (i = 0; i < N_QL; i++) { 755 if (q2ep->q2e_val[i].q2v_cur != 0 || 756 (q2ep->q2e_val[i].q2v_softlimit != 757 q2e.q2e_val[i].q2v_softlimit) || 758 (q2ep->q2e_val[i].q2v_hardlimit != 759 q2e.q2e_val[i].q2v_hardlimit) || 760 (q2ep->q2e_val[i].q2v_grace != 761 q2e.q2e_val[i].q2v_grace)) { 762 canfree = 0; 763 break; 764 } 765 /* note: do not need to check q2v_time */ 766 } 767 768 if (canfree == 0) { 769 quota2_bwrite(ump->um_mountp, bp); 770 goto out_wapbl; 771 } 772 /* we can free it. release bp so we can walk the list */ 773 brelse(bp, 0); 774 mutex_enter(&dqlock); 775 error = getq2h(ump, idtype, &hbp, &q2h, 0); 776 if (error) 777 goto out_dqlock; 778 779 hash_mask = ((1 << q2h->q2h_hash_shift) - 1); 780 c.dq = dq; 781 c.id = id; 782 c.q2h = q2h; 783 error = quota2_walk_list(ump, hbp, idtype, 784 &q2h->q2h_entries[id & hash_mask], B_MODIFY, &c, 785 dq2clear_callback); 786 787 bwrite(hbp); 788 789 out_dqlock: 790 mutex_exit(&dqlock); 791 out_wapbl: 792 UFS_WAPBL_END(ump->um_mountp); 793 out_il: 794 mutex_exit(&dq->dq_interlock); 795 out_dq: 796 dqrele(NULLVP, dq); 797 return error; 798 } 799 800 static int 801 quota2_fetch_q2e(struct ufsmount *ump, const struct quotakey *qk, 802 struct quota2_entry *ret) 803 { 804 struct dquot *dq; 805 int error; 806 struct quota2_entry *q2ep; 807 struct buf *bp; 808 const int needswap = UFS_MPNEEDSWAP(ump); 809 810 error = dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq); 811 if (error) 812 return error; 813 814 mutex_enter(&dq->dq_interlock); 815 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) { 816 mutex_exit(&dq->dq_interlock); 817 dqrele(NULLVP, dq); 818 return SET_ERROR(ENOENT); 819 } 820 error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff, 821 &bp, &q2ep, 0); 822 if (error) { 823 mutex_exit(&dq->dq_interlock); 824 dqrele(NULLVP, dq); 825 return error; 826 } 827 quota2_ufs_rwq2e(q2ep, ret, needswap); 828 brelse(bp, 0); 829 mutex_exit(&dq->dq_interlock); 830 dqrele(NULLVP, dq); 831 832 return 0; 833 } 834 835 static int 836 quota2_fetch_quotaval(struct ufsmount *ump, const struct quotakey *qk, 837 struct quotaval *ret) 838 { 839 struct dquot *dq; 840 int error; 841 struct quota2_entry *q2ep, q2e; 842 struct buf *bp; 843 const int needswap = UFS_MPNEEDSWAP(ump); 844 id_t id2; 845 846 error = dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq); 847 if (error) 848 return error; 849 850 mutex_enter(&dq->dq_interlock); 851 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) { 852 mutex_exit(&dq->dq_interlock); 853 dqrele(NULLVP, dq); 854 return SET_ERROR(ENOENT); 855 } 856 error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff, 857 &bp, &q2ep, 0); 858 if (error) { 859 mutex_exit(&dq->dq_interlock); 860 dqrele(NULLVP, dq); 861 return error; 862 } 863 quota2_ufs_rwq2e(q2ep, &q2e, needswap); 864 brelse(bp, 0); 865 mutex_exit(&dq->dq_interlock); 866 dqrele(NULLVP, dq); 867 868 q2e_to_quotaval(&q2e, 0, &id2, qk->qk_objtype, ret); 869 KASSERT(id2 == qk->qk_id); 870 return 0; 871 } 872 873 int 874 quota2_handle_cmd_get(struct ufsmount *ump, const struct quotakey *qk, 875 struct quotaval *qv) 876 { 877 int error; 878 struct quota2_header *q2h; 879 struct quota2_entry q2e; 880 struct buf *bp; 881 const int needswap = UFS_MPNEEDSWAP(ump); 882 id_t id2; 883 884 /* 885 * Make sure the FS-independent codes match the internal ones, 886 * so we can use the passed-in objtype without having to 887 * convert it explicitly to QL_BLOCK/QL_FILE. 888 */ 889 CTASSERT(QL_BLOCK == QUOTA_OBJTYPE_BLOCKS); 890 CTASSERT(QL_FILE == QUOTA_OBJTYPE_FILES); 891 CTASSERT(N_QL == 2); 892 893 if (qk->qk_objtype < 0 || qk->qk_objtype >= N_QL) { 894 return SET_ERROR(EINVAL); 895 } 896 897 if (ump->um_quotas[qk->qk_idtype] == NULLVP) 898 return SET_ERROR(ENODEV); 899 if (qk->qk_id == QUOTA_DEFAULTID) { 900 mutex_enter(&dqlock); 901 error = getq2h(ump, qk->qk_idtype, &bp, &q2h, 0); 902 if (error) { 903 mutex_exit(&dqlock); 904 return error; 905 } 906 quota2_ufs_rwq2e(&q2h->q2h_defentry, &q2e, needswap); 907 mutex_exit(&dqlock); 908 brelse(bp, 0); 909 q2e_to_quotaval(&q2e, qk->qk_id == QUOTA_DEFAULTID, &id2, 910 qk->qk_objtype, qv); 911 (void)id2; 912 } else 913 error = quota2_fetch_quotaval(ump, qk, qv); 914 915 return error; 916 } 917 918 /* 919 * Cursor structure we used. 920 * 921 * This will get stored in userland between calls so we must not assume 922 * it isn't arbitrarily corrupted. 923 */ 924 struct ufsq2_cursor { 925 uint32_t q2c_magic; /* magic number */ 926 int q2c_hashsize; /* size of hash table at last go */ 927 928 int q2c_users_done; /* true if we've returned all user data */ 929 int q2c_groups_done; /* true if we've returned all group data */ 930 int q2c_defaults_done; /* true if we've returned the default values */ 931 int q2c_hashpos; /* slot to start at in hash table */ 932 int q2c_uidpos; /* number of ids we've handled */ 933 int q2c_blocks_done; /* true if we've returned the blocks value */ 934 }; 935 936 /* 937 * State of a single cursorget call, or at least the part of it that 938 * needs to be passed around. 939 */ 940 struct q2cursor_state { 941 /* data return pointers */ 942 struct quotakey *keys; 943 struct quotaval *vals; 944 945 /* key/value counters */ 946 unsigned maxkeyvals; 947 unsigned numkeys; /* number of keys assigned */ 948 949 /* ID to key/value conversion state */ 950 int skipfirst; /* if true skip first key/value */ 951 int skiplast; /* if true skip last key/value */ 952 953 /* ID counters */ 954 unsigned maxids; /* maximum number of IDs to handle */ 955 unsigned numids; /* number of IDs handled */ 956 }; 957 958 /* 959 * Additional structure for getids callback. 960 */ 961 struct q2cursor_getids { 962 struct q2cursor_state *state; 963 int idtype; 964 unsigned skip; /* number of ids to skip over */ 965 unsigned new_skip; /* number of ids to skip over next time */ 966 unsigned skipped; /* number skipped so far */ 967 int stopped; /* true if we stopped quota_walk_list early */ 968 }; 969 970 /* 971 * Cursor-related functions 972 */ 973 974 /* magic number */ 975 #define Q2C_MAGIC (0xbeebe111) 976 977 /* extract cursor from caller form */ 978 #define Q2CURSOR(qkc) ((struct ufsq2_cursor *)&qkc->u.qkc_space[0]) 979 980 /* 981 * Check that a cursor we're handed is something like valid. If 982 * someone munges it and it still passes these checks, they'll get 983 * partial or odd results back but won't break anything. 984 */ 985 static int 986 q2cursor_check(struct ufsq2_cursor *cursor) 987 { 988 if (cursor->q2c_magic != Q2C_MAGIC) { 989 return SET_ERROR(EINVAL); 990 } 991 if (cursor->q2c_hashsize < 0) { 992 return SET_ERROR(EINVAL); 993 } 994 995 if (cursor->q2c_users_done != 0 && cursor->q2c_users_done != 1) { 996 return SET_ERROR(EINVAL); 997 } 998 if (cursor->q2c_groups_done != 0 && cursor->q2c_groups_done != 1) { 999 return SET_ERROR(EINVAL); 1000 } 1001 if (cursor->q2c_defaults_done != 0 && cursor->q2c_defaults_done != 1) { 1002 return SET_ERROR(EINVAL); 1003 } 1004 if (cursor->q2c_hashpos < 0 || cursor->q2c_uidpos < 0) { 1005 return SET_ERROR(EINVAL); 1006 } 1007 if (cursor->q2c_blocks_done != 0 && cursor->q2c_blocks_done != 1) { 1008 return SET_ERROR(EINVAL); 1009 } 1010 return 0; 1011 } 1012 1013 /* 1014 * Set up the q2cursor state. 1015 */ 1016 static void 1017 q2cursor_initstate(struct q2cursor_state *state, struct quotakey *keys, 1018 struct quotaval *vals, unsigned maxkeyvals, int blocks_done) 1019 { 1020 state->keys = keys; 1021 state->vals = vals; 1022 1023 state->maxkeyvals = maxkeyvals; 1024 state->numkeys = 0; 1025 1026 /* 1027 * For each ID there are two quotavals to return. If the 1028 * maximum number of entries to return is odd, we might want 1029 * to skip the first quotaval of the first ID, or the last 1030 * quotaval of the last ID, but not both. So the number of IDs 1031 * we want is (up to) half the number of return slots we have, 1032 * rounded up. 1033 */ 1034 1035 state->maxids = (state->maxkeyvals + 1) / 2; 1036 state->numids = 0; 1037 if (state->maxkeyvals % 2) { 1038 if (blocks_done) { 1039 state->skipfirst = 1; 1040 state->skiplast = 0; 1041 } else { 1042 state->skipfirst = 0; 1043 state->skiplast = 1; 1044 } 1045 } else { 1046 state->skipfirst = 0; 1047 state->skiplast = 0; 1048 } 1049 } 1050 1051 /* 1052 * Choose which idtype we're going to work on. If doing a full 1053 * iteration, we do users first, then groups, but either might be 1054 * disabled or marked to skip via cursorsetidtype(), so don't make 1055 * silly assumptions. 1056 */ 1057 static int 1058 q2cursor_pickidtype(struct ufsq2_cursor *cursor, int *idtype_ret) 1059 { 1060 if (cursor->q2c_users_done == 0) { 1061 *idtype_ret = QUOTA_IDTYPE_USER; 1062 } else if (cursor->q2c_groups_done == 0) { 1063 *idtype_ret = QUOTA_IDTYPE_GROUP; 1064 } else { 1065 return SET_ERROR(EAGAIN); 1066 } 1067 return 0; 1068 } 1069 1070 /* 1071 * Add an ID to the current state. Sets up either one or two keys to 1072 * refer to it, depending on whether it's first/last and the setting 1073 * of skipfirst. (skiplast does not need to be explicitly tested) 1074 */ 1075 static void 1076 q2cursor_addid(struct q2cursor_state *state, int idtype, id_t id) 1077 { 1078 KASSERT(state->numids < state->maxids); 1079 KASSERT(state->numkeys < state->maxkeyvals); 1080 1081 if (!state->skipfirst || state->numkeys > 0) { 1082 state->keys[state->numkeys].qk_idtype = idtype; 1083 state->keys[state->numkeys].qk_id = id; 1084 state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_BLOCKS; 1085 state->numkeys++; 1086 } 1087 if (state->numkeys < state->maxkeyvals) { 1088 state->keys[state->numkeys].qk_idtype = idtype; 1089 state->keys[state->numkeys].qk_id = id; 1090 state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_FILES; 1091 state->numkeys++; 1092 } else { 1093 KASSERT(state->skiplast); 1094 } 1095 state->numids++; 1096 } 1097 1098 /* 1099 * Callback function for getting IDs. Update counting and call addid. 1100 */ 1101 static int 1102 q2cursor_getids_callback(struct ufsmount *ump, uint64_t *offp, 1103 struct quota2_entry *q2ep, uint64_t off, void *v) 1104 { 1105 struct q2cursor_getids *gi = v; 1106 id_t id; 1107 const int needswap = UFS_MPNEEDSWAP(ump); 1108 1109 if (gi->skipped < gi->skip) { 1110 gi->skipped++; 1111 return 0; 1112 } 1113 id = ufs_rw32(q2ep->q2e_uid, needswap); 1114 q2cursor_addid(gi->state, gi->idtype, id); 1115 gi->new_skip++; 1116 if (gi->state->numids >= gi->state->maxids) { 1117 /* got enough ids, stop now */ 1118 gi->stopped = 1; 1119 return Q2WL_ABORT; 1120 } 1121 return 0; 1122 } 1123 1124 /* 1125 * Fill in a batch of quotakeys by scanning one or more hash chains. 1126 */ 1127 static int 1128 q2cursor_getkeys(struct ufsmount *ump, int idtype, struct ufsq2_cursor *cursor, 1129 struct q2cursor_state *state, 1130 int *hashsize_ret, struct quota2_entry *default_q2e_ret) 1131 { 1132 const int needswap = UFS_MPNEEDSWAP(ump); 1133 struct buf *hbp; 1134 struct quota2_header *q2h; 1135 int quota2_hash_size; 1136 struct q2cursor_getids gi; 1137 uint64_t offset; 1138 int error; 1139 1140 /* 1141 * Read the header block. 1142 */ 1143 1144 mutex_enter(&dqlock); 1145 error = getq2h(ump, idtype, &hbp, &q2h, 0); 1146 if (error) { 1147 mutex_exit(&dqlock); 1148 return error; 1149 } 1150 1151 /* if the table size has changed, make the caller start over */ 1152 quota2_hash_size = ufs_rw16(q2h->q2h_hash_size, needswap); 1153 if (cursor->q2c_hashsize == 0) { 1154 cursor->q2c_hashsize = quota2_hash_size; 1155 } else if (cursor->q2c_hashsize != quota2_hash_size) { 1156 error = SET_ERROR(EDEADLK); 1157 goto scanfail; 1158 } 1159 1160 /* grab the entry with the default values out of the header */ 1161 quota2_ufs_rwq2e(&q2h->q2h_defentry, default_q2e_ret, needswap); 1162 1163 /* If we haven't done the defaults yet, that goes first. */ 1164 if (cursor->q2c_defaults_done == 0) { 1165 q2cursor_addid(state, idtype, QUOTA_DEFAULTID); 1166 /* if we read both halves, mark it done */ 1167 if (state->numids < state->maxids || !state->skiplast) { 1168 cursor->q2c_defaults_done = 1; 1169 } 1170 } 1171 1172 gi.state = state; 1173 gi.idtype = idtype; 1174 1175 while (state->numids < state->maxids) { 1176 if (cursor->q2c_hashpos >= quota2_hash_size) { 1177 /* nothing more left */ 1178 break; 1179 } 1180 1181 /* scan this hash chain */ 1182 gi.skip = cursor->q2c_uidpos; 1183 gi.new_skip = gi.skip; 1184 gi.skipped = 0; 1185 gi.stopped = 0; 1186 offset = q2h->q2h_entries[cursor->q2c_hashpos]; 1187 1188 error = quota2_walk_list(ump, hbp, idtype, &offset, 0, &gi, 1189 q2cursor_getids_callback); 1190 KASSERT(error != Q2WL_ABORT); 1191 if (error) { 1192 break; 1193 } 1194 if (gi.stopped) { 1195 /* callback stopped before reading whole chain */ 1196 cursor->q2c_uidpos = gi.new_skip; 1197 /* if we didn't get both halves, back up */ 1198 if (state->numids == state->maxids && state->skiplast){ 1199 KASSERT(cursor->q2c_uidpos > 0); 1200 cursor->q2c_uidpos--; 1201 } 1202 } else { 1203 /* read whole chain */ 1204 /* if we got both halves of the last id, advance */ 1205 if (state->numids < state->maxids || !state->skiplast){ 1206 cursor->q2c_uidpos = 0; 1207 cursor->q2c_hashpos++; 1208 } 1209 } 1210 } 1211 1212 scanfail: 1213 mutex_exit(&dqlock); 1214 brelse(hbp, 0); 1215 if (error) 1216 return error; 1217 1218 *hashsize_ret = quota2_hash_size; 1219 return 0; 1220 } 1221 1222 /* 1223 * Fetch the quotavals for the quotakeys. 1224 */ 1225 static int 1226 q2cursor_getvals(struct ufsmount *ump, struct q2cursor_state *state, 1227 const struct quota2_entry *default_q2e) 1228 { 1229 int hasid; 1230 id_t loadedid, id; 1231 unsigned pos; 1232 struct quota2_entry q2e; 1233 int objtype; 1234 int error; 1235 1236 hasid = 0; 1237 loadedid = 0; 1238 for (pos = 0; pos < state->numkeys; pos++) { 1239 id = state->keys[pos].qk_id; 1240 if (!hasid || id != loadedid) { 1241 hasid = 1; 1242 loadedid = id; 1243 if (id == QUOTA_DEFAULTID) { 1244 q2e = *default_q2e; 1245 } else { 1246 error = quota2_fetch_q2e(ump, 1247 &state->keys[pos], 1248 &q2e); 1249 if (error == ENOENT) { 1250 /* something changed - start over */ 1251 error = SET_ERROR(EDEADLK); 1252 } 1253 if (error) { 1254 return error; 1255 } 1256 } 1257 } 1258 1259 1260 objtype = state->keys[pos].qk_objtype; 1261 KASSERT(objtype >= 0 && objtype < N_QL); 1262 q2val_to_quotaval(&q2e.q2e_val[objtype], &state->vals[pos]); 1263 } 1264 1265 return 0; 1266 } 1267 1268 /* 1269 * Handle cursorget. 1270 * 1271 * We can't just read keys and values directly, because we can't walk 1272 * the list with qdlock and grab dq_interlock to read the entries at 1273 * the same time. So we're going to do two passes: one to figure out 1274 * which IDs we want and fill in the keys, and then a second to use 1275 * the keys to fetch the values. 1276 */ 1277 int 1278 quota2_handle_cmd_cursorget(struct ufsmount *ump, struct quotakcursor *qkc, 1279 struct quotakey *keys, struct quotaval *vals, unsigned maxreturn, 1280 unsigned *ret) 1281 { 1282 int error; 1283 struct ufsq2_cursor *cursor; 1284 struct ufsq2_cursor newcursor; 1285 struct q2cursor_state state; 1286 struct quota2_entry default_q2e; 1287 int idtype; 1288 int quota2_hash_size = 0; /* XXX: sh3 gcc 4.8 -Wuninitialized */ 1289 1290 /* 1291 * Convert and validate the cursor. 1292 */ 1293 cursor = Q2CURSOR(qkc); 1294 error = q2cursor_check(cursor); 1295 if (error) { 1296 return error; 1297 } 1298 1299 /* 1300 * Make sure our on-disk codes match the values of the 1301 * FS-independent ones. This avoids the need for explicit 1302 * conversion (which would be a NOP anyway and thus easily 1303 * left out or called in the wrong places...) 1304 */ 1305 CTASSERT(QUOTA_IDTYPE_USER == USRQUOTA); 1306 CTASSERT(QUOTA_IDTYPE_GROUP == GRPQUOTA); 1307 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK); 1308 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE); 1309 1310 /* 1311 * If some of the idtypes aren't configured/enabled, arrange 1312 * to skip over them. 1313 */ 1314 if (cursor->q2c_users_done == 0 && 1315 ump->um_quotas[USRQUOTA] == NULLVP) { 1316 cursor->q2c_users_done = 1; 1317 } 1318 if (cursor->q2c_groups_done == 0 && 1319 ump->um_quotas[GRPQUOTA] == NULLVP) { 1320 cursor->q2c_groups_done = 1; 1321 } 1322 1323 /* Loop over, potentially, both idtypes */ 1324 while (1) { 1325 1326 /* Choose id type */ 1327 error = q2cursor_pickidtype(cursor, &idtype); 1328 if (error == EAGAIN) { 1329 /* nothing more to do, return 0 */ 1330 *ret = 0; 1331 return 0; 1332 } 1333 KASSERT(ump->um_quotas[idtype] != NULLVP); 1334 1335 /* 1336 * Initialize the per-call iteration state. Copy the 1337 * cursor state so we can update it in place but back 1338 * out on error. 1339 */ 1340 q2cursor_initstate(&state, keys, vals, maxreturn, 1341 cursor->q2c_blocks_done); 1342 newcursor = *cursor; 1343 1344 /* Assign keys */ 1345 error = q2cursor_getkeys(ump, idtype, &newcursor, &state, 1346 "a2_hash_size, &default_q2e); 1347 if (error) { 1348 return error; 1349 } 1350 1351 /* Now fill in the values. */ 1352 error = q2cursor_getvals(ump, &state, &default_q2e); 1353 if (error) { 1354 return error; 1355 } 1356 1357 /* 1358 * Now that we aren't going to fail and lose what we 1359 * did so far, we can update the cursor state. 1360 */ 1361 1362 if (newcursor.q2c_hashpos >= quota2_hash_size) { 1363 if (idtype == QUOTA_IDTYPE_USER) 1364 cursor->q2c_users_done = 1; 1365 else 1366 cursor->q2c_groups_done = 1; 1367 1368 /* start over on another id type */ 1369 cursor->q2c_hashsize = 0; 1370 cursor->q2c_defaults_done = 0; 1371 cursor->q2c_hashpos = 0; 1372 cursor->q2c_uidpos = 0; 1373 cursor->q2c_blocks_done = 0; 1374 } else { 1375 *cursor = newcursor; 1376 cursor->q2c_blocks_done = state.skiplast; 1377 } 1378 1379 /* 1380 * If we have something to return, return it. 1381 * Otherwise, continue to the other idtype, if any, 1382 * and only return zero at end of iteration. 1383 */ 1384 if (state.numkeys > 0) { 1385 break; 1386 } 1387 } 1388 1389 *ret = state.numkeys; 1390 return 0; 1391 } 1392 1393 int 1394 quota2_handle_cmd_cursoropen(struct ufsmount *ump, struct quotakcursor *qkc) 1395 { 1396 struct ufsq2_cursor *cursor; 1397 1398 CTASSERT(sizeof(*cursor) <= sizeof(qkc->u.qkc_space)); 1399 cursor = Q2CURSOR(qkc); 1400 1401 cursor->q2c_magic = Q2C_MAGIC; 1402 cursor->q2c_hashsize = 0; 1403 1404 cursor->q2c_users_done = 0; 1405 cursor->q2c_groups_done = 0; 1406 cursor->q2c_defaults_done = 0; 1407 cursor->q2c_hashpos = 0; 1408 cursor->q2c_uidpos = 0; 1409 cursor->q2c_blocks_done = 0; 1410 return 0; 1411 } 1412 1413 int 1414 quota2_handle_cmd_cursorclose(struct ufsmount *ump, struct quotakcursor *qkc) 1415 { 1416 struct ufsq2_cursor *cursor; 1417 int error; 1418 1419 cursor = Q2CURSOR(qkc); 1420 error = q2cursor_check(cursor); 1421 if (error) { 1422 return error; 1423 } 1424 1425 /* nothing to do */ 1426 1427 return 0; 1428 } 1429 1430 int 1431 quota2_handle_cmd_cursorskipidtype(struct ufsmount *ump, 1432 struct quotakcursor *qkc, int idtype) 1433 { 1434 struct ufsq2_cursor *cursor; 1435 int error; 1436 1437 cursor = Q2CURSOR(qkc); 1438 error = q2cursor_check(cursor); 1439 if (error) { 1440 return error; 1441 } 1442 1443 switch (idtype) { 1444 case QUOTA_IDTYPE_USER: 1445 cursor->q2c_users_done = 1; 1446 break; 1447 case QUOTA_IDTYPE_GROUP: 1448 cursor->q2c_groups_done = 1; 1449 break; 1450 default: 1451 return SET_ERROR(EINVAL); 1452 } 1453 1454 return 0; 1455 } 1456 1457 int 1458 quota2_handle_cmd_cursoratend(struct ufsmount *ump, struct quotakcursor *qkc, 1459 int *ret) 1460 { 1461 struct ufsq2_cursor *cursor; 1462 int error; 1463 1464 cursor = Q2CURSOR(qkc); 1465 error = q2cursor_check(cursor); 1466 if (error) { 1467 return error; 1468 } 1469 1470 *ret = (cursor->q2c_users_done && cursor->q2c_groups_done); 1471 return 0; 1472 } 1473 1474 int 1475 quota2_handle_cmd_cursorrewind(struct ufsmount *ump, struct quotakcursor *qkc) 1476 { 1477 struct ufsq2_cursor *cursor; 1478 int error; 1479 1480 cursor = Q2CURSOR(qkc); 1481 error = q2cursor_check(cursor); 1482 if (error) { 1483 return error; 1484 } 1485 1486 cursor->q2c_hashsize = 0; 1487 1488 cursor->q2c_users_done = 0; 1489 cursor->q2c_groups_done = 0; 1490 cursor->q2c_defaults_done = 0; 1491 cursor->q2c_hashpos = 0; 1492 cursor->q2c_uidpos = 0; 1493 cursor->q2c_blocks_done = 0; 1494 1495 return 0; 1496 } 1497 1498 int 1499 q2sync(struct mount *mp) 1500 { 1501 return 0; 1502 } 1503 1504 struct dq2get_callback { 1505 uid_t id; 1506 struct dquot *dq; 1507 }; 1508 1509 static int 1510 dq2get_callback(struct ufsmount *ump, uint64_t *offp, struct quota2_entry *q2e, 1511 uint64_t off, void *v) 1512 { 1513 struct dq2get_callback *c = v; 1514 daddr_t lblkno; 1515 int blkoff; 1516 const int needswap = UFS_MPNEEDSWAP(ump); 1517 1518 if (ufs_rw32(q2e->q2e_uid, needswap) == c->id) { 1519 KASSERT(mutex_owned(&c->dq->dq_interlock)); 1520 lblkno = (off >> ump->um_mountp->mnt_fs_bshift); 1521 blkoff = (off & ump->umq2_bmask); 1522 c->dq->dq2_lblkno = lblkno; 1523 c->dq->dq2_blkoff = blkoff; 1524 return Q2WL_ABORT; 1525 } 1526 return 0; 1527 } 1528 1529 int 1530 dq2get(struct vnode *dqvp, u_long id, struct ufsmount *ump, int type, 1531 struct dquot *dq) 1532 { 1533 struct buf *bp; 1534 struct quota2_header *q2h; 1535 int error; 1536 daddr_t offset; 1537 u_long hash_mask; 1538 struct dq2get_callback c = { 1539 .id = id, 1540 .dq = dq 1541 }; 1542 1543 KASSERT(mutex_owned(&dq->dq_interlock)); 1544 mutex_enter(&dqlock); 1545 error = getq2h(ump, type, &bp, &q2h, 0); 1546 if (error) 1547 goto out_mutex; 1548 /* look for our entry */ 1549 hash_mask = ((1 << q2h->q2h_hash_shift) - 1); 1550 offset = q2h->q2h_entries[id & hash_mask]; 1551 error = quota2_walk_list(ump, bp, type, &offset, 0, (void *)&c, 1552 dq2get_callback); 1553 brelse(bp, 0); 1554 out_mutex: 1555 mutex_exit(&dqlock); 1556 return error; 1557 } 1558 1559 int 1560 dq2sync(struct vnode *vp, struct dquot *dq) 1561 { 1562 return 0; 1563 } 1564