1 1.83 riastrad /* $NetBSD: vfs_lockf.c,v 1.83 2024/12/07 02:27:38 riastradh Exp $ */ 2 1.5 cgd 3 1.1 ws /* 4 1.4 mycroft * Copyright (c) 1982, 1986, 1989, 1993 5 1.4 mycroft * The Regents of the University of California. All rights reserved. 6 1.1 ws * 7 1.1 ws * This code is derived from software contributed to Berkeley by 8 1.1 ws * Scooter Morris at Genentech Inc. 9 1.1 ws * 10 1.1 ws * Redistribution and use in source and binary forms, with or without 11 1.1 ws * modification, are permitted provided that the following conditions 12 1.1 ws * are met: 13 1.1 ws * 1. Redistributions of source code must retain the above copyright 14 1.1 ws * notice, this list of conditions and the following disclaimer. 15 1.1 ws * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 ws * notice, this list of conditions and the following disclaimer in the 17 1.1 ws * documentation and/or other materials provided with the distribution. 18 1.33 agc * 3. Neither the name of the University nor the names of its contributors 19 1.1 ws * may be used to endorse or promote products derived from this software 20 1.1 ws * without specific prior written permission. 21 1.1 ws * 22 1.1 ws * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 1.1 ws * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 1.1 ws * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 1.1 ws * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 1.1 ws * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 1.1 ws * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 1.1 ws * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 1.1 ws * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 1.1 ws * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 1.1 ws * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 1.1 ws * SUCH DAMAGE. 33 1.1 ws * 34 1.12 fvdl * @(#)ufs_lockf.c 8.4 (Berkeley) 10/26/94 35 1.1 ws */ 36 1.18 lukem 37 1.18 lukem #include <sys/cdefs.h> 38 1.83 riastrad __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.83 2024/12/07 02:27:38 riastradh Exp $"); 39 1.1 ws 40 1.1 ws #include <sys/param.h> 41 1.82 riastrad #include <sys/types.h> 42 1.82 riastrad 43 1.82 riastrad #include <sys/atomic.h> 44 1.82 riastrad #include <sys/fcntl.h> 45 1.82 riastrad #include <sys/file.h> 46 1.82 riastrad #include <sys/kauth.h> 47 1.1 ws #include <sys/kernel.h> 48 1.81 ad #include <sys/kmem.h> 49 1.1 ws #include <sys/lockf.h> 50 1.82 riastrad #include <sys/proc.h> 51 1.83 riastrad #include <sys/sdt.h> 52 1.82 riastrad #include <sys/systm.h> 53 1.69 pooka #include <sys/uidinfo.h> 54 1.82 riastrad #include <sys/vnode.h> 55 1.22 thorpej 56 1.50 yamt /* 57 1.50 yamt * The lockf structure is a kernel structure which contains the information 58 1.50 yamt * associated with a byte range lock. The lockf structures are linked into 59 1.60 ad * the vnode structure. Locks are sorted by the starting byte of the lock for 60 1.50 yamt * efficiency. 61 1.50 yamt * 62 1.50 yamt * lf_next is used for two purposes, depending on whether the lock is 63 1.50 yamt * being held, or is in conflict with an existing lock. If this lock 64 1.50 yamt * is held, it indicates the next lock on the same vnode. 65 1.50 yamt * For pending locks, if lock->lf_next is non-NULL, then lock->lf_block 66 1.50 yamt * must be queued on the lf_blkhd TAILQ of lock->lf_next. 67 1.50 yamt */ 68 1.50 yamt 69 1.50 yamt TAILQ_HEAD(locklist, lockf); 70 1.50 yamt 71 1.50 yamt struct lockf { 72 1.65 ad kcondvar_t lf_cv; /* Signalling */ 73 1.50 yamt short lf_flags; /* Lock semantics: F_POSIX, F_FLOCK, F_WAIT */ 74 1.50 yamt short lf_type; /* Lock type: F_RDLCK, F_WRLCK */ 75 1.50 yamt off_t lf_start; /* The byte # of the start of the lock */ 76 1.50 yamt off_t lf_end; /* The byte # of the end of the lock (-1=EOF)*/ 77 1.50 yamt void *lf_id; /* process or file description holding lock */ 78 1.50 yamt struct lockf **lf_head; /* Back pointer to the head of lockf list */ 79 1.50 yamt struct lockf *lf_next; /* Next lock on this vnode, or blocking lock */ 80 1.50 yamt struct locklist lf_blkhd; /* List of requests blocked on this lock */ 81 1.50 yamt TAILQ_ENTRY(lockf) lf_block;/* A request waiting for a lock */ 82 1.81 ad struct uidinfo *lf_uip; /* Cached pointer to uidinfo */ 83 1.50 yamt }; 84 1.50 yamt 85 1.50 yamt /* Maximum length of sleep chains to traverse to try and detect deadlock. */ 86 1.50 yamt #define MAXDEPTH 50 87 1.50 yamt 88 1.81 ad static kmutex_t lockf_lock __cacheline_aligned; 89 1.65 ad static char lockstr[] = "lockf"; 90 1.1 ws 91 1.1 ws /* 92 1.6 mycroft * This variable controls the maximum number of processes that will 93 1.6 mycroft * be checked in doing deadlock detection. 94 1.6 mycroft */ 95 1.6 mycroft int maxlockdepth = MAXDEPTH; 96 1.6 mycroft 97 1.6 mycroft #ifdef LOCKF_DEBUG 98 1.6 mycroft int lockf_debug = 0; 99 1.6 mycroft #endif 100 1.6 mycroft 101 1.6 mycroft #define SELF 0x1 102 1.6 mycroft #define OTHERS 0x2 103 1.6 mycroft 104 1.6 mycroft /* 105 1.16 sommerfe * XXX TODO 106 1.58 christos * Misc cleanups: "void *id" should be visible in the API as a 107 1.16 sommerfe * "struct proc *". 108 1.16 sommerfe * (This requires rototilling all VFS's which support advisory locking). 109 1.16 sommerfe */ 110 1.16 sommerfe 111 1.16 sommerfe /* 112 1.16 sommerfe * If there's a lot of lock contention on a single vnode, locking 113 1.16 sommerfe * schemes which allow for more paralleism would be needed. Given how 114 1.16 sommerfe * infrequently byte-range locks are actually used in typical BSD 115 1.16 sommerfe * code, a more complex approach probably isn't worth it. 116 1.16 sommerfe */ 117 1.16 sommerfe 118 1.16 sommerfe /* 119 1.38 christos * We enforce a limit on locks by uid, so that a single user cannot 120 1.38 christos * run the kernel out of memory. For now, the limit is pretty coarse. 121 1.38 christos * There is no limit on root. 122 1.38 christos * 123 1.38 christos * Splitting a lock will always succeed, regardless of current allocations. 124 1.38 christos * If you're slightly above the limit, we still have to permit an allocation 125 1.38 christos * so that the unlock can succeed. If the unlocking causes too many splits, 126 1.38 christos * however, you're totally cutoff. 127 1.38 christos */ 128 1.74 manu #define MAXLOCKSPERUID (2 * maxfiles) 129 1.38 christos 130 1.45 thorpej #ifdef LOCKF_DEBUG 131 1.45 thorpej /* 132 1.45 thorpej * Print out a lock. 133 1.45 thorpej */ 134 1.45 thorpej static void 135 1.56 christos lf_print(const char *tag, struct lockf *lock) 136 1.45 thorpej { 137 1.45 thorpej 138 1.45 thorpej printf("%s: lock %p for ", tag, lock); 139 1.45 thorpej if (lock->lf_flags & F_POSIX) 140 1.45 thorpej printf("proc %d", ((struct proc *)lock->lf_id)->p_pid); 141 1.45 thorpej else 142 1.45 thorpej printf("file %p", (struct file *)lock->lf_id); 143 1.73 dholland printf(" %s, start %jd, end %jd", 144 1.45 thorpej lock->lf_type == F_RDLCK ? "shared" : 145 1.45 thorpej lock->lf_type == F_WRLCK ? "exclusive" : 146 1.45 thorpej lock->lf_type == F_UNLCK ? "unlock" : 147 1.73 dholland "unknown", (intmax_t)lock->lf_start, (intmax_t)lock->lf_end); 148 1.45 thorpej if (TAILQ_FIRST(&lock->lf_blkhd)) 149 1.45 thorpej printf(" block %p\n", TAILQ_FIRST(&lock->lf_blkhd)); 150 1.45 thorpej else 151 1.45 thorpej printf("\n"); 152 1.45 thorpej } 153 1.45 thorpej 154 1.45 thorpej static void 155 1.56 christos lf_printlist(const char *tag, struct lockf *lock) 156 1.45 thorpej { 157 1.45 thorpej struct lockf *lf, *blk; 158 1.45 thorpej 159 1.45 thorpej printf("%s: Lock list:\n", tag); 160 1.45 thorpej for (lf = *lock->lf_head; lf; lf = lf->lf_next) { 161 1.45 thorpej printf("\tlock %p for ", lf); 162 1.45 thorpej if (lf->lf_flags & F_POSIX) 163 1.45 thorpej printf("proc %d", ((struct proc *)lf->lf_id)->p_pid); 164 1.45 thorpej else 165 1.45 thorpej printf("file %p", (struct file *)lf->lf_id); 166 1.73 dholland printf(", %s, start %jd, end %jd", 167 1.82 riastrad lf->lf_type == F_RDLCK ? "shared" : 168 1.82 riastrad lf->lf_type == F_WRLCK ? "exclusive" : 169 1.82 riastrad lf->lf_type == F_UNLCK ? "unlock" : 170 1.82 riastrad "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end); 171 1.45 thorpej TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) { 172 1.45 thorpej if (blk->lf_flags & F_POSIX) 173 1.66 skrll printf("; proc %d", 174 1.45 thorpej ((struct proc *)blk->lf_id)->p_pid); 175 1.45 thorpej else 176 1.66 skrll printf("; file %p", (struct file *)blk->lf_id); 177 1.73 dholland printf(", %s, start %jd, end %jd", 178 1.82 riastrad blk->lf_type == F_RDLCK ? "shared" : 179 1.82 riastrad blk->lf_type == F_WRLCK ? "exclusive" : 180 1.82 riastrad blk->lf_type == F_UNLCK ? "unlock" : 181 1.82 riastrad "unknown", 182 1.82 riastrad (intmax_t)blk->lf_start, (intmax_t)blk->lf_end); 183 1.45 thorpej if (TAILQ_FIRST(&blk->lf_blkhd)) 184 1.45 thorpej panic("lf_printlist: bad list"); 185 1.45 thorpej } 186 1.45 thorpej printf("\n"); 187 1.45 thorpej } 188 1.45 thorpej } 189 1.45 thorpej #endif /* LOCKF_DEBUG */ 190 1.45 thorpej 191 1.38 christos /* 192 1.38 christos * 3 options for allowfail. 193 1.38 christos * 0 - always allocate. 1 - cutoff at limit. 2 - cutoff at double limit. 194 1.38 christos */ 195 1.45 thorpej static struct lockf * 196 1.71 yamt lf_alloc(int allowfail) 197 1.38 christos { 198 1.38 christos struct uidinfo *uip; 199 1.38 christos struct lockf *lock; 200 1.62 rmind u_long lcnt; 201 1.71 yamt const uid_t uid = kauth_cred_geteuid(kauth_cred_get()); 202 1.38 christos 203 1.38 christos uip = uid_find(uid); 204 1.62 rmind lcnt = atomic_inc_ulong_nv(&uip->ui_lockcnt); 205 1.62 rmind if (uid && allowfail && lcnt > 206 1.74 manu (allowfail == 1 ? MAXLOCKSPERUID : (MAXLOCKSPERUID * 2))) { 207 1.62 rmind atomic_dec_ulong(&uip->ui_lockcnt); 208 1.40 christos return NULL; 209 1.40 christos } 210 1.62 rmind 211 1.81 ad lock = kmem_alloc(sizeof(*lock), KM_SLEEP); 212 1.81 ad lock->lf_uip = uip; 213 1.81 ad cv_init(&lock->lf_cv, lockstr); 214 1.40 christos return lock; 215 1.38 christos } 216 1.38 christos 217 1.45 thorpej static void 218 1.38 christos lf_free(struct lockf *lock) 219 1.38 christos { 220 1.80 ad 221 1.81 ad atomic_dec_ulong(&lock->lf_uip->ui_lockcnt); 222 1.61 ad cv_destroy(&lock->lf_cv); 223 1.81 ad kmem_free(lock, sizeof(*lock)); 224 1.38 christos } 225 1.38 christos 226 1.38 christos /* 227 1.45 thorpej * Walk the list of locks for an inode to 228 1.45 thorpej * find an overlapping lock (if any). 229 1.45 thorpej * 230 1.45 thorpej * NOTE: this returns only the FIRST overlapping lock. There 231 1.45 thorpej * may be more than one. 232 1.1 ws */ 233 1.45 thorpej static int 234 1.45 thorpej lf_findoverlap(struct lockf *lf, struct lockf *lock, int type, 235 1.45 thorpej struct lockf ***prev, struct lockf **overlap) 236 1.1 ws { 237 1.1 ws off_t start, end; 238 1.1 ws 239 1.45 thorpej *overlap = lf; 240 1.54 yamt if (lf == NULL) 241 1.45 thorpej return 0; 242 1.45 thorpej #ifdef LOCKF_DEBUG 243 1.45 thorpej if (lockf_debug & 2) 244 1.45 thorpej lf_print("lf_findoverlap: looking for overlap in", lock); 245 1.45 thorpej #endif /* LOCKF_DEBUG */ 246 1.45 thorpej start = lock->lf_start; 247 1.45 thorpej end = lock->lf_end; 248 1.54 yamt while (lf != NULL) { 249 1.45 thorpej if (((type == SELF) && lf->lf_id != lock->lf_id) || 250 1.45 thorpej ((type == OTHERS) && lf->lf_id == lock->lf_id)) { 251 1.45 thorpej *prev = &lf->lf_next; 252 1.45 thorpej *overlap = lf = lf->lf_next; 253 1.45 thorpej continue; 254 1.45 thorpej } 255 1.45 thorpej #ifdef LOCKF_DEBUG 256 1.45 thorpej if (lockf_debug & 2) 257 1.45 thorpej lf_print("\tchecking", lf); 258 1.45 thorpej #endif /* LOCKF_DEBUG */ 259 1.1 ws /* 260 1.45 thorpej * OK, check for overlap 261 1.45 thorpej * 262 1.45 thorpej * Six cases: 263 1.45 thorpej * 0) no overlap 264 1.45 thorpej * 1) overlap == lock 265 1.45 thorpej * 2) overlap contains lock 266 1.45 thorpej * 3) lock contains overlap 267 1.45 thorpej * 4) overlap starts before lock 268 1.45 thorpej * 5) overlap ends after lock 269 1.1 ws */ 270 1.45 thorpej if ((lf->lf_end != -1 && start > lf->lf_end) || 271 1.45 thorpej (end != -1 && lf->lf_start > end)) { 272 1.45 thorpej /* Case 0 */ 273 1.45 thorpej #ifdef LOCKF_DEBUG 274 1.45 thorpej if (lockf_debug & 2) 275 1.45 thorpej printf("no overlap\n"); 276 1.45 thorpej #endif /* LOCKF_DEBUG */ 277 1.45 thorpej if ((type & SELF) && end != -1 && lf->lf_start > end) 278 1.45 thorpej return 0; 279 1.45 thorpej *prev = &lf->lf_next; 280 1.45 thorpej *overlap = lf = lf->lf_next; 281 1.45 thorpej continue; 282 1.45 thorpej } 283 1.45 thorpej if ((lf->lf_start == start) && (lf->lf_end == end)) { 284 1.45 thorpej /* Case 1 */ 285 1.45 thorpej #ifdef LOCKF_DEBUG 286 1.45 thorpej if (lockf_debug & 2) 287 1.45 thorpej printf("overlap == lock\n"); 288 1.45 thorpej #endif /* LOCKF_DEBUG */ 289 1.45 thorpej return 1; 290 1.45 thorpej } 291 1.45 thorpej if ((lf->lf_start <= start) && 292 1.45 thorpej (end != -1) && 293 1.45 thorpej ((lf->lf_end >= end) || (lf->lf_end == -1))) { 294 1.45 thorpej /* Case 2 */ 295 1.45 thorpej #ifdef LOCKF_DEBUG 296 1.45 thorpej if (lockf_debug & 2) 297 1.45 thorpej printf("overlap contains lock\n"); 298 1.45 thorpej #endif /* LOCKF_DEBUG */ 299 1.45 thorpej return 2; 300 1.45 thorpej } 301 1.45 thorpej if (start <= lf->lf_start && 302 1.45 thorpej (end == -1 || 303 1.45 thorpej (lf->lf_end != -1 && end >= lf->lf_end))) { 304 1.45 thorpej /* Case 3 */ 305 1.45 thorpej #ifdef LOCKF_DEBUG 306 1.45 thorpej if (lockf_debug & 2) 307 1.45 thorpej printf("lock contains overlap\n"); 308 1.45 thorpej #endif /* LOCKF_DEBUG */ 309 1.45 thorpej return 3; 310 1.45 thorpej } 311 1.45 thorpej if ((lf->lf_start < start) && 312 1.45 thorpej ((lf->lf_end >= start) || (lf->lf_end == -1))) { 313 1.45 thorpej /* Case 4 */ 314 1.45 thorpej #ifdef LOCKF_DEBUG 315 1.45 thorpej if (lockf_debug & 2) 316 1.45 thorpej printf("overlap starts before lock\n"); 317 1.45 thorpej #endif /* LOCKF_DEBUG */ 318 1.45 thorpej return 4; 319 1.45 thorpej } 320 1.45 thorpej if ((lf->lf_start > start) && 321 1.45 thorpej (end != -1) && 322 1.45 thorpej ((lf->lf_end > end) || (lf->lf_end == -1))) { 323 1.45 thorpej /* Case 5 */ 324 1.45 thorpej #ifdef LOCKF_DEBUG 325 1.45 thorpej if (lockf_debug & 2) 326 1.45 thorpej printf("overlap ends after lock\n"); 327 1.45 thorpej #endif /* LOCKF_DEBUG */ 328 1.45 thorpej return 5; 329 1.45 thorpej } 330 1.45 thorpej panic("lf_findoverlap: default"); 331 1.45 thorpej } 332 1.45 thorpej return 0; 333 1.45 thorpej } 334 1.1 ws 335 1.45 thorpej /* 336 1.45 thorpej * Split a lock and a contained region into 337 1.45 thorpej * two or three locks as necessary. 338 1.45 thorpej */ 339 1.45 thorpej static void 340 1.45 thorpej lf_split(struct lockf *lock1, struct lockf *lock2, struct lockf **sparelock) 341 1.45 thorpej { 342 1.45 thorpej struct lockf *splitlock; 343 1.1 ws 344 1.45 thorpej #ifdef LOCKF_DEBUG 345 1.45 thorpej if (lockf_debug & 2) { 346 1.45 thorpej lf_print("lf_split", lock1); 347 1.45 thorpej lf_print("splitting from", lock2); 348 1.1 ws } 349 1.45 thorpej #endif /* LOCKF_DEBUG */ 350 1.10 kleink /* 351 1.75 andvar * Check to see if splitting into only two pieces. 352 1.27 yamt */ 353 1.45 thorpej if (lock1->lf_start == lock2->lf_start) { 354 1.45 thorpej lock1->lf_start = lock2->lf_end + 1; 355 1.45 thorpej lock2->lf_next = lock1; 356 1.45 thorpej return; 357 1.27 yamt } 358 1.45 thorpej if (lock1->lf_end == lock2->lf_end) { 359 1.45 thorpej lock1->lf_end = lock2->lf_start - 1; 360 1.45 thorpej lock2->lf_next = lock1->lf_next; 361 1.45 thorpej lock1->lf_next = lock2; 362 1.45 thorpej return; 363 1.27 yamt } 364 1.27 yamt /* 365 1.45 thorpej * Make a new lock consisting of the last part of 366 1.45 thorpej * the encompassing lock 367 1.10 kleink */ 368 1.45 thorpej splitlock = *sparelock; 369 1.45 thorpej *sparelock = NULL; 370 1.70 yamt cv_destroy(&splitlock->lf_cv); 371 1.45 thorpej memcpy(splitlock, lock1, sizeof(*splitlock)); 372 1.67 skrll cv_init(&splitlock->lf_cv, lockstr); 373 1.67 skrll 374 1.45 thorpej splitlock->lf_start = lock2->lf_end + 1; 375 1.45 thorpej TAILQ_INIT(&splitlock->lf_blkhd); 376 1.45 thorpej lock1->lf_end = lock2->lf_start - 1; 377 1.1 ws /* 378 1.45 thorpej * OK, now link it in 379 1.21 thorpej */ 380 1.45 thorpej splitlock->lf_next = lock1->lf_next; 381 1.45 thorpej lock2->lf_next = splitlock; 382 1.45 thorpej lock1->lf_next = lock2; 383 1.45 thorpej } 384 1.45 thorpej 385 1.45 thorpej /* 386 1.45 thorpej * Wakeup a blocklist 387 1.45 thorpej */ 388 1.45 thorpej static void 389 1.45 thorpej lf_wakelock(struct lockf *listhead) 390 1.45 thorpej { 391 1.45 thorpej struct lockf *wakelock; 392 1.21 thorpej 393 1.45 thorpej while ((wakelock = TAILQ_FIRST(&listhead->lf_blkhd))) { 394 1.45 thorpej KASSERT(wakelock->lf_next == listhead); 395 1.45 thorpej TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block); 396 1.54 yamt wakelock->lf_next = NULL; 397 1.45 thorpej #ifdef LOCKF_DEBUG 398 1.45 thorpej if (lockf_debug & 2) 399 1.45 thorpej lf_print("lf_wakelock: awakening", wakelock); 400 1.45 thorpej #endif 401 1.61 ad cv_broadcast(&wakelock->lf_cv); 402 1.21 thorpej } 403 1.45 thorpej } 404 1.45 thorpej 405 1.45 thorpej /* 406 1.45 thorpej * Remove a byte-range lock on an inode. 407 1.45 thorpej * 408 1.45 thorpej * Generally, find the lock (or an overlap to that lock) 409 1.45 thorpej * and remove it (or shrink it), then wakeup anyone we can. 410 1.45 thorpej */ 411 1.45 thorpej static int 412 1.45 thorpej lf_clearlock(struct lockf *unlock, struct lockf **sparelock) 413 1.45 thorpej { 414 1.45 thorpej struct lockf **head = unlock->lf_head; 415 1.45 thorpej struct lockf *lf = *head; 416 1.45 thorpej struct lockf *overlap, **prev; 417 1.45 thorpej int ovcase; 418 1.45 thorpej 419 1.54 yamt if (lf == NULL) 420 1.45 thorpej return 0; 421 1.45 thorpej #ifdef LOCKF_DEBUG 422 1.45 thorpej if (unlock->lf_type != F_UNLCK) 423 1.45 thorpej panic("lf_clearlock: bad type"); 424 1.45 thorpej if (lockf_debug & 1) 425 1.45 thorpej lf_print("lf_clearlock", unlock); 426 1.45 thorpej #endif /* LOCKF_DEBUG */ 427 1.45 thorpej prev = head; 428 1.45 thorpej while ((ovcase = lf_findoverlap(lf, unlock, SELF, 429 1.61 ad &prev, &overlap)) != 0) { 430 1.45 thorpej /* 431 1.45 thorpej * Wakeup the list of locks to be retried. 432 1.45 thorpej */ 433 1.45 thorpej lf_wakelock(overlap); 434 1.45 thorpej 435 1.45 thorpej switch (ovcase) { 436 1.37 perry 437 1.45 thorpej case 1: /* overlap == lock */ 438 1.45 thorpej *prev = overlap->lf_next; 439 1.45 thorpej lf_free(overlap); 440 1.45 thorpej break; 441 1.4 mycroft 442 1.45 thorpej case 2: /* overlap contains lock: split it */ 443 1.45 thorpej if (overlap->lf_start == unlock->lf_start) { 444 1.45 thorpej overlap->lf_start = unlock->lf_end + 1; 445 1.45 thorpej break; 446 1.45 thorpej } 447 1.45 thorpej lf_split(overlap, unlock, sparelock); 448 1.45 thorpej overlap->lf_next = unlock->lf_next; 449 1.45 thorpej break; 450 1.1 ws 451 1.45 thorpej case 3: /* lock contains overlap */ 452 1.45 thorpej *prev = overlap->lf_next; 453 1.45 thorpej lf = overlap->lf_next; 454 1.45 thorpej lf_free(overlap); 455 1.45 thorpej continue; 456 1.1 ws 457 1.45 thorpej case 4: /* overlap starts before lock */ 458 1.45 thorpej overlap->lf_end = unlock->lf_start - 1; 459 1.45 thorpej prev = &overlap->lf_next; 460 1.45 thorpej lf = overlap->lf_next; 461 1.45 thorpej continue; 462 1.4 mycroft 463 1.45 thorpej case 5: /* overlap ends after lock */ 464 1.45 thorpej overlap->lf_start = unlock->lf_end + 1; 465 1.45 thorpej break; 466 1.45 thorpej } 467 1.31 fvdl break; 468 1.27 yamt } 469 1.45 thorpej #ifdef LOCKF_DEBUG 470 1.45 thorpej if (lockf_debug & 1) 471 1.45 thorpej lf_printlist("lf_clearlock", unlock); 472 1.45 thorpej #endif /* LOCKF_DEBUG */ 473 1.45 thorpej return 0; 474 1.45 thorpej } 475 1.27 yamt 476 1.45 thorpej /* 477 1.45 thorpej * Walk the list of locks for an inode and 478 1.45 thorpej * return the first blocking lock. 479 1.45 thorpej */ 480 1.45 thorpej static struct lockf * 481 1.45 thorpej lf_getblock(struct lockf *lock) 482 1.45 thorpej { 483 1.45 thorpej struct lockf **prev, *overlap, *lf = *(lock->lf_head); 484 1.27 yamt 485 1.45 thorpej prev = lock->lf_head; 486 1.45 thorpej while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) { 487 1.45 thorpej /* 488 1.45 thorpej * We've found an overlap, see if it blocks us 489 1.45 thorpej */ 490 1.45 thorpej if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) 491 1.45 thorpej return overlap; 492 1.45 thorpej /* 493 1.45 thorpej * Nope, point to the next one on the list and 494 1.45 thorpej * see if it blocks us 495 1.45 thorpej */ 496 1.45 thorpej lf = overlap->lf_next; 497 1.45 thorpej } 498 1.54 yamt return NULL; 499 1.1 ws } 500 1.1 ws 501 1.1 ws /* 502 1.1 ws * Set a byte-range lock. 503 1.1 ws */ 504 1.24 yamt static int 505 1.27 yamt lf_setlock(struct lockf *lock, struct lockf **sparelock, 506 1.61 ad kmutex_t *interlock) 507 1.1 ws { 508 1.15 augustss struct lockf *block; 509 1.1 ws struct lockf **head = lock->lf_head; 510 1.1 ws struct lockf **prev, *overlap, *ltmp; 511 1.61 ad int ovcase, needtolink, error; 512 1.1 ws 513 1.1 ws #ifdef LOCKF_DEBUG 514 1.1 ws if (lockf_debug & 1) 515 1.1 ws lf_print("lf_setlock", lock); 516 1.1 ws #endif /* LOCKF_DEBUG */ 517 1.1 ws 518 1.1 ws /* 519 1.1 ws * Scan lock list for this file looking for locks that would block us. 520 1.1 ws */ 521 1.7 christos while ((block = lf_getblock(lock)) != NULL) { 522 1.1 ws /* 523 1.1 ws * Free the structure and return if nonblocking. 524 1.1 ws */ 525 1.1 ws if ((lock->lf_flags & F_WAIT) == 0) { 526 1.38 christos lf_free(lock); 527 1.83 riastrad return SET_ERROR(EAGAIN); 528 1.1 ws } 529 1.1 ws /* 530 1.1 ws * We are blocked. Since flock style locks cover 531 1.1 ws * the whole file, there is no chance for deadlock. 532 1.1 ws * For byte-range locks we must check for deadlock. 533 1.1 ws * 534 1.1 ws * Deadlock detection is done by looking through the 535 1.1 ws * wait channels to see if there are any cycles that 536 1.1 ws * involve us. MAXDEPTH is set just to make sure we 537 1.16 sommerfe * do not go off into neverneverland. 538 1.1 ws */ 539 1.1 ws if ((lock->lf_flags & F_POSIX) && 540 1.1 ws (block->lf_flags & F_POSIX)) { 541 1.21 thorpej struct lwp *wlwp; 542 1.48 perry volatile const struct lockf *waitblock; 543 1.1 ws int i = 0; 544 1.52 yamt struct proc *p; 545 1.1 ws 546 1.52 yamt p = (struct proc *)block->lf_id; 547 1.52 yamt KASSERT(p != NULL); 548 1.52 yamt while (i++ < maxlockdepth) { 549 1.64 ad mutex_enter(p->p_lock); 550 1.52 yamt if (p->p_nlwps > 1) { 551 1.64 ad mutex_exit(p->p_lock); 552 1.52 yamt break; 553 1.52 yamt } 554 1.52 yamt wlwp = LIST_FIRST(&p->p_lwps); 555 1.57 ad lwp_lock(wlwp); 556 1.65 ad if (wlwp->l_wchan == NULL || 557 1.65 ad wlwp->l_wmesg != lockstr) { 558 1.57 ad lwp_unlock(wlwp); 559 1.64 ad mutex_exit(p->p_lock); 560 1.52 yamt break; 561 1.52 yamt } 562 1.44 christos waitblock = wlwp->l_wchan; 563 1.57 ad lwp_unlock(wlwp); 564 1.64 ad mutex_exit(p->p_lock); 565 1.1 ws /* Get the owner of the blocking lock */ 566 1.1 ws waitblock = waitblock->lf_next; 567 1.1 ws if ((waitblock->lf_flags & F_POSIX) == 0) 568 1.1 ws break; 569 1.52 yamt p = (struct proc *)waitblock->lf_id; 570 1.52 yamt if (p == curproc) { 571 1.38 christos lf_free(lock); 572 1.83 riastrad return SET_ERROR(EDEADLK); 573 1.1 ws } 574 1.1 ws } 575 1.16 sommerfe /* 576 1.36 peter * If we're still following a dependency chain 577 1.16 sommerfe * after maxlockdepth iterations, assume we're in 578 1.16 sommerfe * a cycle to be safe. 579 1.16 sommerfe */ 580 1.16 sommerfe if (i >= maxlockdepth) { 581 1.38 christos lf_free(lock); 582 1.83 riastrad return SET_ERROR(EDEADLK); 583 1.16 sommerfe } 584 1.1 ws } 585 1.1 ws /* 586 1.1 ws * For flock type locks, we must first remove 587 1.1 ws * any shared locks that we hold before we sleep 588 1.1 ws * waiting for an exclusive lock. 589 1.1 ws */ 590 1.1 ws if ((lock->lf_flags & F_FLOCK) && 591 1.1 ws lock->lf_type == F_WRLCK) { 592 1.1 ws lock->lf_type = F_UNLCK; 593 1.27 yamt (void) lf_clearlock(lock, NULL); 594 1.1 ws lock->lf_type = F_WRLCK; 595 1.1 ws } 596 1.1 ws /* 597 1.1 ws * Add our lock to the blocked list and sleep until we're free. 598 1.1 ws * Remember who blocked us (for deadlock detection). 599 1.1 ws */ 600 1.1 ws lock->lf_next = block; 601 1.12 fvdl TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block); 602 1.1 ws #ifdef LOCKF_DEBUG 603 1.1 ws if (lockf_debug & 1) { 604 1.1 ws lf_print("lf_setlock: blocking on", block); 605 1.1 ws lf_printlist("lf_setlock", block); 606 1.1 ws } 607 1.1 ws #endif /* LOCKF_DEBUG */ 608 1.61 ad error = cv_wait_sig(&lock->lf_cv, interlock); 609 1.16 sommerfe 610 1.16 sommerfe /* 611 1.65 ad * We may have been awoken by a signal (in 612 1.16 sommerfe * which case we must remove ourselves from the 613 1.16 sommerfe * blocked list) and/or by another process 614 1.16 sommerfe * releasing a lock (in which case we have already 615 1.16 sommerfe * been removed from the blocked list and our 616 1.54 yamt * lf_next field set to NULL). 617 1.16 sommerfe */ 618 1.54 yamt if (lock->lf_next != NULL) { 619 1.16 sommerfe TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block); 620 1.54 yamt lock->lf_next = NULL; 621 1.16 sommerfe } 622 1.7 christos if (error) { 623 1.38 christos lf_free(lock); 624 1.29 yamt return error; 625 1.1 ws } 626 1.1 ws } 627 1.1 ws /* 628 1.1 ws * No blocks!! Add the lock. Note that we will 629 1.1 ws * downgrade or upgrade any overlapping locks this 630 1.1 ws * process already owns. 631 1.1 ws * 632 1.1 ws * Skip over locks owned by other processes. 633 1.1 ws * Handle any locks that overlap and are owned by ourselves. 634 1.1 ws */ 635 1.1 ws prev = head; 636 1.1 ws block = *head; 637 1.1 ws needtolink = 1; 638 1.1 ws for (;;) { 639 1.7 christos ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap); 640 1.7 christos if (ovcase) 641 1.1 ws block = overlap->lf_next; 642 1.1 ws /* 643 1.1 ws * Six cases: 644 1.1 ws * 0) no overlap 645 1.1 ws * 1) overlap == lock 646 1.1 ws * 2) overlap contains lock 647 1.1 ws * 3) lock contains overlap 648 1.1 ws * 4) overlap starts before lock 649 1.1 ws * 5) overlap ends after lock 650 1.1 ws */ 651 1.1 ws switch (ovcase) { 652 1.1 ws case 0: /* no overlap */ 653 1.1 ws if (needtolink) { 654 1.1 ws *prev = lock; 655 1.1 ws lock->lf_next = overlap; 656 1.1 ws } 657 1.1 ws break; 658 1.1 ws 659 1.1 ws case 1: /* overlap == lock */ 660 1.1 ws /* 661 1.1 ws * If downgrading lock, others may be 662 1.1 ws * able to acquire it. 663 1.1 ws */ 664 1.1 ws if (lock->lf_type == F_RDLCK && 665 1.1 ws overlap->lf_type == F_WRLCK) 666 1.1 ws lf_wakelock(overlap); 667 1.1 ws overlap->lf_type = lock->lf_type; 668 1.38 christos lf_free(lock); 669 1.1 ws lock = overlap; /* for debug output below */ 670 1.1 ws break; 671 1.1 ws 672 1.1 ws case 2: /* overlap contains lock */ 673 1.1 ws /* 674 1.1 ws * Check for common starting point and different types. 675 1.1 ws */ 676 1.1 ws if (overlap->lf_type == lock->lf_type) { 677 1.38 christos lf_free(lock); 678 1.1 ws lock = overlap; /* for debug output below */ 679 1.1 ws break; 680 1.1 ws } 681 1.1 ws if (overlap->lf_start == lock->lf_start) { 682 1.1 ws *prev = lock; 683 1.1 ws lock->lf_next = overlap; 684 1.1 ws overlap->lf_start = lock->lf_end + 1; 685 1.1 ws } else 686 1.27 yamt lf_split(overlap, lock, sparelock); 687 1.1 ws lf_wakelock(overlap); 688 1.1 ws break; 689 1.1 ws 690 1.1 ws case 3: /* lock contains overlap */ 691 1.1 ws /* 692 1.1 ws * If downgrading lock, others may be able to 693 1.1 ws * acquire it, otherwise take the list. 694 1.1 ws */ 695 1.1 ws if (lock->lf_type == F_RDLCK && 696 1.1 ws overlap->lf_type == F_WRLCK) { 697 1.1 ws lf_wakelock(overlap); 698 1.1 ws } else { 699 1.82 riastrad while ((ltmp = 700 1.82 riastrad TAILQ_FIRST(&overlap->lf_blkhd)) 701 1.82 riastrad != NULL) { 702 1.16 sommerfe KASSERT(ltmp->lf_next == overlap); 703 1.12 fvdl TAILQ_REMOVE(&overlap->lf_blkhd, ltmp, 704 1.12 fvdl lf_block); 705 1.16 sommerfe ltmp->lf_next = lock; 706 1.12 fvdl TAILQ_INSERT_TAIL(&lock->lf_blkhd, 707 1.12 fvdl ltmp, lf_block); 708 1.12 fvdl } 709 1.1 ws } 710 1.1 ws /* 711 1.82 riastrad * Add the new lock if necessary and delete the 712 1.82 riastrad * overlap. 713 1.1 ws */ 714 1.1 ws if (needtolink) { 715 1.1 ws *prev = lock; 716 1.1 ws lock->lf_next = overlap->lf_next; 717 1.1 ws prev = &lock->lf_next; 718 1.1 ws needtolink = 0; 719 1.1 ws } else 720 1.1 ws *prev = overlap->lf_next; 721 1.39 christos lf_free(overlap); 722 1.1 ws continue; 723 1.1 ws 724 1.1 ws case 4: /* overlap starts before lock */ 725 1.1 ws /* 726 1.1 ws * Add lock after overlap on the list. 727 1.1 ws */ 728 1.1 ws lock->lf_next = overlap->lf_next; 729 1.1 ws overlap->lf_next = lock; 730 1.1 ws overlap->lf_end = lock->lf_start - 1; 731 1.1 ws prev = &lock->lf_next; 732 1.1 ws lf_wakelock(overlap); 733 1.1 ws needtolink = 0; 734 1.1 ws continue; 735 1.1 ws 736 1.1 ws case 5: /* overlap ends after lock */ 737 1.1 ws /* 738 1.1 ws * Add the new lock before overlap. 739 1.1 ws */ 740 1.1 ws if (needtolink) { 741 1.1 ws *prev = lock; 742 1.1 ws lock->lf_next = overlap; 743 1.1 ws } 744 1.1 ws overlap->lf_start = lock->lf_end + 1; 745 1.1 ws lf_wakelock(overlap); 746 1.1 ws break; 747 1.1 ws } 748 1.1 ws break; 749 1.1 ws } 750 1.1 ws #ifdef LOCKF_DEBUG 751 1.1 ws if (lockf_debug & 1) { 752 1.1 ws lf_print("lf_setlock: got the lock", lock); 753 1.1 ws lf_printlist("lf_setlock", lock); 754 1.1 ws } 755 1.1 ws #endif /* LOCKF_DEBUG */ 756 1.29 yamt return 0; 757 1.1 ws } 758 1.1 ws 759 1.1 ws /* 760 1.1 ws * Check whether there is a blocking lock, 761 1.1 ws * and if so return its process identifier. 762 1.1 ws */ 763 1.24 yamt static int 764 1.25 yamt lf_getlock(struct lockf *lock, struct flock *fl) 765 1.1 ws { 766 1.15 augustss struct lockf *block; 767 1.1 ws 768 1.1 ws #ifdef LOCKF_DEBUG 769 1.1 ws if (lockf_debug & 1) 770 1.1 ws lf_print("lf_getlock", lock); 771 1.1 ws #endif /* LOCKF_DEBUG */ 772 1.1 ws 773 1.7 christos if ((block = lf_getblock(lock)) != NULL) { 774 1.1 ws fl->l_type = block->lf_type; 775 1.1 ws fl->l_whence = SEEK_SET; 776 1.1 ws fl->l_start = block->lf_start; 777 1.1 ws if (block->lf_end == -1) 778 1.1 ws fl->l_len = 0; 779 1.1 ws else 780 1.1 ws fl->l_len = block->lf_end - block->lf_start + 1; 781 1.1 ws if (block->lf_flags & F_POSIX) 782 1.23 mycroft fl->l_pid = ((struct proc *)block->lf_id)->p_pid; 783 1.1 ws else 784 1.1 ws fl->l_pid = -1; 785 1.1 ws } else { 786 1.1 ws fl->l_type = F_UNLCK; 787 1.1 ws } 788 1.29 yamt return 0; 789 1.1 ws } 790 1.1 ws 791 1.1 ws /* 792 1.45 thorpej * Do an advisory lock operation. 793 1.1 ws */ 794 1.45 thorpej int 795 1.45 thorpej lf_advlock(struct vop_advlock_args *ap, struct lockf **head, off_t size) 796 1.1 ws { 797 1.45 thorpej struct flock *fl = ap->a_fl; 798 1.45 thorpej struct lockf *lock = NULL; 799 1.45 thorpej struct lockf *sparelock; 800 1.81 ad kmutex_t *interlock = &lockf_lock; 801 1.45 thorpej off_t start, end; 802 1.45 thorpej int error = 0; 803 1.1 ws 804 1.76 riastrad KASSERTMSG(size >= 0, "size=%jd", (intmax_t)size); 805 1.76 riastrad 806 1.45 thorpej /* 807 1.45 thorpej * Convert the flock structure into a start and end. 808 1.45 thorpej */ 809 1.45 thorpej switch (fl->l_whence) { 810 1.45 thorpej case SEEK_SET: 811 1.45 thorpej case SEEK_CUR: 812 1.1 ws /* 813 1.45 thorpej * Caller is responsible for adding any necessary offset 814 1.45 thorpej * when SEEK_CUR is used. 815 1.1 ws */ 816 1.45 thorpej start = fl->l_start; 817 1.45 thorpej break; 818 1.45 thorpej 819 1.45 thorpej case SEEK_END: 820 1.76 riastrad if (fl->l_start > __type_max(off_t) - size) 821 1.83 riastrad return SET_ERROR(EINVAL); 822 1.45 thorpej start = size + fl->l_start; 823 1.45 thorpej break; 824 1.45 thorpej 825 1.45 thorpej default: 826 1.83 riastrad return SET_ERROR(EINVAL); 827 1.1 ws } 828 1.72 dsl 829 1.72 dsl if (fl->l_len == 0) 830 1.72 dsl end = -1; 831 1.72 dsl else { 832 1.76 riastrad if (fl->l_len >= 0) { 833 1.77 riastrad if (start >= 0 && 834 1.77 riastrad fl->l_len - 1 > __type_max(off_t) - start) 835 1.83 riastrad return SET_ERROR(EINVAL); 836 1.78 riastrad end = start + (fl->l_len - 1); 837 1.76 riastrad } else { 838 1.72 dsl /* lockf() allows -ve lengths */ 839 1.76 riastrad if (start < 0) 840 1.83 riastrad return SET_ERROR(EINVAL); 841 1.72 dsl end = start - 1; 842 1.72 dsl start += fl->l_len; 843 1.72 dsl } 844 1.72 dsl } 845 1.45 thorpej if (start < 0) 846 1.83 riastrad return SET_ERROR(EINVAL); 847 1.1 ws 848 1.45 thorpej /* 849 1.61 ad * Allocate locks before acquiring the interlock. We need two 850 1.55 ad * locks in the worst case. 851 1.45 thorpej */ 852 1.45 thorpej switch (ap->a_op) { 853 1.45 thorpej case F_SETLK: 854 1.45 thorpej case F_UNLCK: 855 1.1 ws /* 856 1.55 ad * XXX For F_UNLCK case, we can re-use the lock. 857 1.1 ws */ 858 1.46 christos if ((ap->a_flags & F_FLOCK) == 0) { 859 1.45 thorpej /* 860 1.55 ad * Byte-range lock might need one more lock. 861 1.45 thorpej */ 862 1.71 yamt sparelock = lf_alloc(0); 863 1.45 thorpej if (sparelock == NULL) { 864 1.83 riastrad error = SET_ERROR(ENOMEM); 865 1.45 thorpej goto quit; 866 1.45 thorpej } 867 1.45 thorpej break; 868 1.1 ws } 869 1.45 thorpej /* FALLTHROUGH */ 870 1.45 thorpej 871 1.45 thorpej case F_GETLK: 872 1.45 thorpej sparelock = NULL; 873 1.45 thorpej break; 874 1.45 thorpej 875 1.45 thorpej default: 876 1.83 riastrad return SET_ERROR(EINVAL); 877 1.45 thorpej } 878 1.45 thorpej 879 1.71 yamt switch (ap->a_op) { 880 1.71 yamt case F_SETLK: 881 1.71 yamt lock = lf_alloc(1); 882 1.71 yamt break; 883 1.71 yamt case F_UNLCK: 884 1.71 yamt if (start == 0 || end == -1) { 885 1.71 yamt /* never split */ 886 1.71 yamt lock = lf_alloc(0); 887 1.71 yamt } else { 888 1.71 yamt /* might split */ 889 1.71 yamt lock = lf_alloc(2); 890 1.71 yamt } 891 1.71 yamt break; 892 1.71 yamt case F_GETLK: 893 1.71 yamt lock = lf_alloc(0); 894 1.71 yamt break; 895 1.71 yamt } 896 1.45 thorpej if (lock == NULL) { 897 1.83 riastrad error = SET_ERROR(ENOMEM); 898 1.45 thorpej goto quit; 899 1.1 ws } 900 1.1 ws 901 1.61 ad mutex_enter(interlock); 902 1.1 ws 903 1.1 ws /* 904 1.45 thorpej * Avoid the common case of unlocking when inode has no locks. 905 1.1 ws */ 906 1.45 thorpej if (*head == (struct lockf *)0) { 907 1.45 thorpej if (ap->a_op != F_SETLK) { 908 1.45 thorpej fl->l_type = F_UNLCK; 909 1.45 thorpej error = 0; 910 1.45 thorpej goto quit_unlock; 911 1.45 thorpej } 912 1.1 ws } 913 1.45 thorpej 914 1.1 ws /* 915 1.45 thorpej * Create the lockf structure. 916 1.45 thorpej */ 917 1.45 thorpej lock->lf_start = start; 918 1.45 thorpej lock->lf_end = end; 919 1.45 thorpej lock->lf_head = head; 920 1.45 thorpej lock->lf_type = fl->l_type; 921 1.45 thorpej lock->lf_next = (struct lockf *)0; 922 1.45 thorpej TAILQ_INIT(&lock->lf_blkhd); 923 1.45 thorpej lock->lf_flags = ap->a_flags; 924 1.45 thorpej if (lock->lf_flags & F_POSIX) { 925 1.45 thorpej KASSERT(curproc == (struct proc *)ap->a_id); 926 1.45 thorpej } 927 1.72 dsl lock->lf_id = ap->a_id; 928 1.45 thorpej 929 1.1 ws /* 930 1.45 thorpej * Do the requested operation. 931 1.1 ws */ 932 1.45 thorpej switch (ap->a_op) { 933 1.1 ws 934 1.45 thorpej case F_SETLK: 935 1.45 thorpej error = lf_setlock(lock, &sparelock, interlock); 936 1.45 thorpej lock = NULL; /* lf_setlock freed it */ 937 1.45 thorpej break; 938 1.1 ws 939 1.45 thorpej case F_UNLCK: 940 1.45 thorpej error = lf_clearlock(lock, &sparelock); 941 1.45 thorpej break; 942 1.1 ws 943 1.45 thorpej case F_GETLK: 944 1.45 thorpej error = lf_getlock(lock, fl); 945 1.45 thorpej break; 946 1.37 perry 947 1.45 thorpej default: 948 1.45 thorpej break; 949 1.45 thorpej /* NOTREACHED */ 950 1.45 thorpej } 951 1.1 ws 952 1.45 thorpej quit_unlock: 953 1.61 ad mutex_exit(interlock); 954 1.45 thorpej quit: 955 1.45 thorpej if (lock) 956 1.45 thorpej lf_free(lock); 957 1.45 thorpej if (sparelock) 958 1.45 thorpej lf_free(sparelock); 959 1.1 ws 960 1.45 thorpej return error; 961 1.1 ws } 962 1.80 ad 963 1.80 ad /* 964 1.82 riastrad * Initialize subsystem. 965 1.82 riastrad * 966 1.82 riastrad * XXX We use a global lock. This could be the vnode interlock, but 967 1.82 riastrad * the deadlock detection code may need to inspect locks belonging to 968 1.82 riastrad * other files. 969 1.80 ad */ 970 1.80 ad void 971 1.80 ad lf_init(void) 972 1.80 ad { 973 1.80 ad 974 1.81 ad mutex_init(&lockf_lock, MUTEX_DEFAULT, IPL_NONE); 975 1.80 ad } 976