vfs_lockf.c revision 1.43 1 /* $NetBSD: vfs_lockf.c,v 1.43 2005/05/22 15:54:47 christos Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Scooter Morris at Genentech Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)ufs_lockf.c 8.4 (Berkeley) 10/26/94
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.43 2005/05/22 15:54:47 christos Exp $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/file.h>
44 #include <sys/proc.h>
45 #include <sys/vnode.h>
46 #include <sys/pool.h>
47 #include <sys/fcntl.h>
48 #include <sys/lockf.h>
49
50 POOL_INIT(lockfpool, sizeof(struct lockf), 0, 0, 0, "lockfpl",
51 &pool_allocator_nointr);
52
53 /*
54 * This variable controls the maximum number of processes that will
55 * be checked in doing deadlock detection.
56 */
57 int maxlockdepth = MAXDEPTH;
58
59 #ifdef LOCKF_DEBUG
60 int lockf_debug = 0;
61 #endif
62
63 #define NOLOCKF (struct lockf *)0
64 #define SELF 0x1
65 #define OTHERS 0x2
66
67 static int lf_clearlock(struct lockf *, struct lockf **);
68 static int lf_findoverlap(struct lockf *,
69 struct lockf *, int, struct lockf ***, struct lockf **);
70 static struct lockf *lf_getblock(struct lockf *);
71 static int lf_getlock(struct lockf *, struct flock *);
72 static int lf_setlock(struct lockf *, struct lockf **, struct simplelock *);
73 static void lf_split(struct lockf *, struct lockf *, struct lockf **);
74 static void lf_wakelock(struct lockf *);
75 static struct lockf *lf_alloc(uid_t, int);
76 static void lf_free(struct lockf *);
77
78
79 #ifdef LOCKF_DEBUG
80 static void lf_print(char *, struct lockf *);
81 static void lf_printlist(char *, struct lockf *);
82 #endif
83
84 /*
85 * XXX TODO
86 * Misc cleanups: "caddr_t id" should be visible in the API as a
87 * "struct proc *".
88 * (This requires rototilling all VFS's which support advisory locking).
89 */
90
91 /*
92 * If there's a lot of lock contention on a single vnode, locking
93 * schemes which allow for more paralleism would be needed. Given how
94 * infrequently byte-range locks are actually used in typical BSD
95 * code, a more complex approach probably isn't worth it.
96 */
97
98 /*
99 * We enforce a limit on locks by uid, so that a single user cannot
100 * run the kernel out of memory. For now, the limit is pretty coarse.
101 * There is no limit on root.
102 *
103 * Splitting a lock will always succeed, regardless of current allocations.
104 * If you're slightly above the limit, we still have to permit an allocation
105 * so that the unlock can succeed. If the unlocking causes too many splits,
106 * however, you're totally cutoff.
107 */
108 int maxlocksperuid = 1024;
109
110 /*
111 * 3 options for allowfail.
112 * 0 - always allocate. 1 - cutoff at limit. 2 - cutoff at double limit.
113 */
114 struct lockf *
115 lf_alloc(uid_t uid, int allowfail)
116 {
117 struct uidinfo *uip;
118 struct lockf *lock;
119 int s;
120
121 uip = uid_find(uid);
122 UILOCK(uip, s);
123 if (uid && allowfail && uip->ui_lockcnt >
124 (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2))) {
125 UIUNLOCK(uip, s);
126 return NULL;
127 }
128 uip->ui_lockcnt++;
129 UIUNLOCK(uip, s);
130 lock = pool_get(&lockfpool, PR_WAITOK);
131 lock->lf_uid = uid;
132 return lock;
133 }
134
135 void
136 lf_free(struct lockf *lock)
137 {
138 struct uidinfo *uip;
139 int s;
140
141 uip = uid_find(lock->lf_uid);
142 UILOCK(uip, s);
143 uip->ui_lockcnt--;
144 UIUNLOCK(uip, s);
145 pool_put(&lockfpool, lock);
146 }
147
148 /*
149 * Do an advisory lock operation.
150 */
151 int
152 lf_advlock(struct vop_advlock_args *ap, struct lockf **head, off_t size)
153 {
154 struct proc *p = curproc;
155 struct flock *fl = ap->a_fl;
156 struct lockf *lock = NULL;
157 struct lockf *sparelock;
158 struct simplelock *interlock = &ap->a_vp->v_interlock;
159 off_t start, end;
160 int error = 0;
161
162 /*
163 * Convert the flock structure into a start and end.
164 */
165 switch (fl->l_whence) {
166 case SEEK_SET:
167 case SEEK_CUR:
168 /*
169 * Caller is responsible for adding any necessary offset
170 * when SEEK_CUR is used.
171 */
172 start = fl->l_start;
173 break;
174
175 case SEEK_END:
176 start = size + fl->l_start;
177 break;
178
179 default:
180 return EINVAL;
181 }
182 if (start < 0)
183 return EINVAL;
184
185 /*
186 * allocate locks before acquire simple lock.
187 * we need two locks in the worst case.
188 */
189 switch (ap->a_op) {
190 case F_SETLK:
191 case F_UNLCK:
192 /*
193 * XXX for F_UNLCK case, we can re-use lock.
194 */
195 if ((fl->l_type & F_FLOCK) == 0) {
196 /*
197 * byte-range lock might need one more lock.
198 */
199 sparelock = lf_alloc(p->p_ucred->cr_uid, 0);
200 if (sparelock == NULL) {
201 error = ENOMEM;
202 goto quit;
203 }
204 break;
205 }
206 /* FALLTHROUGH */
207
208 case F_GETLK:
209 sparelock = NULL;
210 break;
211
212 default:
213 return EINVAL;
214 }
215
216 lock = lf_alloc(p->p_ucred->cr_uid, ap->a_op != F_UNLCK ? 1 : 2);
217 if (lock == NULL) {
218 error = ENOMEM;
219 goto quit;
220 }
221
222 simple_lock(interlock);
223
224 /*
225 * Avoid the common case of unlocking when inode has no locks.
226 */
227 if (*head == (struct lockf *)0) {
228 if (ap->a_op != F_SETLK) {
229 fl->l_type = F_UNLCK;
230 error = 0;
231 goto quit_unlock;
232 }
233 }
234
235 if (fl->l_len == 0)
236 end = -1;
237 else
238 end = start + fl->l_len - 1;
239 /*
240 * Create the lockf structure.
241 */
242 lock->lf_start = start;
243 lock->lf_end = end;
244 /* XXX NJWLWP
245 * I don't want to make the entire VFS universe use LWPs, because
246 * they don't need them, for the most part. This is an exception,
247 * and a kluge.
248 */
249
250 lock->lf_head = head;
251 lock->lf_type = fl->l_type;
252 lock->lf_next = (struct lockf *)0;
253 TAILQ_INIT(&lock->lf_blkhd);
254 lock->lf_flags = ap->a_flags;
255 if (lock->lf_flags & F_POSIX) {
256 KASSERT(curproc == (struct proc *)ap->a_id);
257 }
258 lock->lf_id = (struct proc *)ap->a_id;
259 lock->lf_lwp = curlwp;
260
261 /*
262 * Do the requested operation.
263 */
264 switch (ap->a_op) {
265
266 case F_SETLK:
267 error = lf_setlock(lock, &sparelock, interlock);
268 lock = NULL; /* lf_setlock freed it */
269 break;
270
271 case F_UNLCK:
272 error = lf_clearlock(lock, &sparelock);
273 break;
274
275 case F_GETLK:
276 error = lf_getlock(lock, fl);
277 break;
278
279 default:
280 break;
281 /* NOTREACHED */
282 }
283
284 quit_unlock:
285 simple_unlock(interlock);
286 quit:
287 if (lock)
288 lf_free(lock);
289 if (sparelock)
290 lf_free(sparelock);
291
292 return error;
293 }
294
295 /*
296 * Set a byte-range lock.
297 */
298 static int
299 lf_setlock(struct lockf *lock, struct lockf **sparelock,
300 struct simplelock *interlock)
301 {
302 struct lockf *block;
303 struct lockf **head = lock->lf_head;
304 struct lockf **prev, *overlap, *ltmp;
305 static char lockstr[] = "lockf";
306 int ovcase, priority, needtolink, error;
307
308 #ifdef LOCKF_DEBUG
309 if (lockf_debug & 1)
310 lf_print("lf_setlock", lock);
311 #endif /* LOCKF_DEBUG */
312
313 /*
314 * Set the priority
315 */
316 priority = PLOCK;
317 if (lock->lf_type == F_WRLCK)
318 priority += 4;
319 priority |= PCATCH;
320 /*
321 * Scan lock list for this file looking for locks that would block us.
322 */
323 while ((block = lf_getblock(lock)) != NULL) {
324 /*
325 * Free the structure and return if nonblocking.
326 */
327 if ((lock->lf_flags & F_WAIT) == 0) {
328 lf_free(lock);
329 return EAGAIN;
330 }
331 /*
332 * We are blocked. Since flock style locks cover
333 * the whole file, there is no chance for deadlock.
334 * For byte-range locks we must check for deadlock.
335 *
336 * Deadlock detection is done by looking through the
337 * wait channels to see if there are any cycles that
338 * involve us. MAXDEPTH is set just to make sure we
339 * do not go off into neverneverland.
340 */
341 if ((lock->lf_flags & F_POSIX) &&
342 (block->lf_flags & F_POSIX)) {
343 struct lwp *wlwp;
344 struct lockf *waitblock;
345 int i = 0;
346
347 /*
348 * The block is waiting on something. if_lwp will be
349 * 0 once the lock is granted, so we terminate the
350 * loop if we find this.
351 */
352 wlwp = block->lf_lwp;
353 while (wlwp && (i++ < maxlockdepth)) {
354 waitblock = (struct lockf *)wlwp->l_wchan;
355 /* Get the owner of the blocking lock */
356 waitblock = waitblock->lf_next;
357 if ((waitblock->lf_flags & F_POSIX) == 0)
358 break;
359 wlwp = waitblock->lf_lwp;
360 if (wlwp == lock->lf_lwp) {
361 lf_free(lock);
362 return EDEADLK;
363 }
364 }
365 /*
366 * If we're still following a dependency chain
367 * after maxlockdepth iterations, assume we're in
368 * a cycle to be safe.
369 */
370 if (i >= maxlockdepth) {
371 lf_free(lock);
372 return EDEADLK;
373 }
374 }
375 /*
376 * For flock type locks, we must first remove
377 * any shared locks that we hold before we sleep
378 * waiting for an exclusive lock.
379 */
380 if ((lock->lf_flags & F_FLOCK) &&
381 lock->lf_type == F_WRLCK) {
382 lock->lf_type = F_UNLCK;
383 (void) lf_clearlock(lock, NULL);
384 lock->lf_type = F_WRLCK;
385 }
386 /*
387 * Add our lock to the blocked list and sleep until we're free.
388 * Remember who blocked us (for deadlock detection).
389 */
390 lock->lf_next = block;
391 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
392 #ifdef LOCKF_DEBUG
393 if (lockf_debug & 1) {
394 lf_print("lf_setlock: blocking on", block);
395 lf_printlist("lf_setlock", block);
396 }
397 #endif /* LOCKF_DEBUG */
398 error = ltsleep(lock, priority, lockstr, 0, interlock);
399
400 /*
401 * We may have been awakened by a signal (in
402 * which case we must remove ourselves from the
403 * blocked list) and/or by another process
404 * releasing a lock (in which case we have already
405 * been removed from the blocked list and our
406 * lf_next field set to NOLOCKF).
407 */
408 if (lock->lf_next != NOLOCKF) {
409 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
410 lock->lf_next = NOLOCKF;
411 }
412 if (error) {
413 lf_free(lock);
414 return error;
415 }
416 }
417 /*
418 * No blocks!! Add the lock. Note that we will
419 * downgrade or upgrade any overlapping locks this
420 * process already owns.
421 *
422 * Skip over locks owned by other processes.
423 * Handle any locks that overlap and are owned by ourselves.
424 */
425 lock->lf_lwp = 0;
426 prev = head;
427 block = *head;
428 needtolink = 1;
429 for (;;) {
430 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
431 if (ovcase)
432 block = overlap->lf_next;
433 /*
434 * Six cases:
435 * 0) no overlap
436 * 1) overlap == lock
437 * 2) overlap contains lock
438 * 3) lock contains overlap
439 * 4) overlap starts before lock
440 * 5) overlap ends after lock
441 */
442 switch (ovcase) {
443 case 0: /* no overlap */
444 if (needtolink) {
445 *prev = lock;
446 lock->lf_next = overlap;
447 }
448 break;
449
450 case 1: /* overlap == lock */
451 /*
452 * If downgrading lock, others may be
453 * able to acquire it.
454 */
455 if (lock->lf_type == F_RDLCK &&
456 overlap->lf_type == F_WRLCK)
457 lf_wakelock(overlap);
458 overlap->lf_type = lock->lf_type;
459 lf_free(lock);
460 lock = overlap; /* for debug output below */
461 break;
462
463 case 2: /* overlap contains lock */
464 /*
465 * Check for common starting point and different types.
466 */
467 if (overlap->lf_type == lock->lf_type) {
468 lf_free(lock);
469 lock = overlap; /* for debug output below */
470 break;
471 }
472 if (overlap->lf_start == lock->lf_start) {
473 *prev = lock;
474 lock->lf_next = overlap;
475 overlap->lf_start = lock->lf_end + 1;
476 } else
477 lf_split(overlap, lock, sparelock);
478 lf_wakelock(overlap);
479 break;
480
481 case 3: /* lock contains overlap */
482 /*
483 * If downgrading lock, others may be able to
484 * acquire it, otherwise take the list.
485 */
486 if (lock->lf_type == F_RDLCK &&
487 overlap->lf_type == F_WRLCK) {
488 lf_wakelock(overlap);
489 } else {
490 while ((ltmp = TAILQ_FIRST(&overlap->lf_blkhd))) {
491 KASSERT(ltmp->lf_next == overlap);
492 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
493 lf_block);
494 ltmp->lf_next = lock;
495 TAILQ_INSERT_TAIL(&lock->lf_blkhd,
496 ltmp, lf_block);
497 }
498 }
499 /*
500 * Add the new lock if necessary and delete the overlap.
501 */
502 if (needtolink) {
503 *prev = lock;
504 lock->lf_next = overlap->lf_next;
505 prev = &lock->lf_next;
506 needtolink = 0;
507 } else
508 *prev = overlap->lf_next;
509 lf_free(overlap);
510 continue;
511
512 case 4: /* overlap starts before lock */
513 /*
514 * Add lock after overlap on the list.
515 */
516 lock->lf_next = overlap->lf_next;
517 overlap->lf_next = lock;
518 overlap->lf_end = lock->lf_start - 1;
519 prev = &lock->lf_next;
520 lf_wakelock(overlap);
521 needtolink = 0;
522 continue;
523
524 case 5: /* overlap ends after lock */
525 /*
526 * Add the new lock before overlap.
527 */
528 if (needtolink) {
529 *prev = lock;
530 lock->lf_next = overlap;
531 }
532 overlap->lf_start = lock->lf_end + 1;
533 lf_wakelock(overlap);
534 break;
535 }
536 break;
537 }
538 #ifdef LOCKF_DEBUG
539 if (lockf_debug & 1) {
540 lf_print("lf_setlock: got the lock", lock);
541 lf_printlist("lf_setlock", lock);
542 }
543 #endif /* LOCKF_DEBUG */
544 return 0;
545 }
546
547 /*
548 * Remove a byte-range lock on an inode.
549 *
550 * Generally, find the lock (or an overlap to that lock)
551 * and remove it (or shrink it), then wakeup anyone we can.
552 */
553 static int
554 lf_clearlock(struct lockf *unlock, struct lockf **sparelock)
555 {
556 struct lockf **head = unlock->lf_head;
557 struct lockf *lf = *head;
558 struct lockf *overlap, **prev;
559 int ovcase;
560
561 if (lf == NOLOCKF)
562 return 0;
563 #ifdef LOCKF_DEBUG
564 if (unlock->lf_type != F_UNLCK)
565 panic("lf_clearlock: bad type");
566 if (lockf_debug & 1)
567 lf_print("lf_clearlock", unlock);
568 #endif /* LOCKF_DEBUG */
569 prev = head;
570 while ((ovcase = lf_findoverlap(lf, unlock, SELF,
571 &prev, &overlap)) != 0) {
572 /*
573 * Wakeup the list of locks to be retried.
574 */
575 lf_wakelock(overlap);
576
577 switch (ovcase) {
578
579 case 1: /* overlap == lock */
580 *prev = overlap->lf_next;
581 lf_free(overlap);
582 break;
583
584 case 2: /* overlap contains lock: split it */
585 if (overlap->lf_start == unlock->lf_start) {
586 overlap->lf_start = unlock->lf_end + 1;
587 break;
588 }
589 lf_split(overlap, unlock, sparelock);
590 overlap->lf_next = unlock->lf_next;
591 break;
592
593 case 3: /* lock contains overlap */
594 *prev = overlap->lf_next;
595 lf = overlap->lf_next;
596 lf_free(overlap);
597 continue;
598
599 case 4: /* overlap starts before lock */
600 overlap->lf_end = unlock->lf_start - 1;
601 prev = &overlap->lf_next;
602 lf = overlap->lf_next;
603 continue;
604
605 case 5: /* overlap ends after lock */
606 overlap->lf_start = unlock->lf_end + 1;
607 break;
608 }
609 break;
610 }
611 #ifdef LOCKF_DEBUG
612 if (lockf_debug & 1)
613 lf_printlist("lf_clearlock", unlock);
614 #endif /* LOCKF_DEBUG */
615 return 0;
616 }
617
618 /*
619 * Check whether there is a blocking lock,
620 * and if so return its process identifier.
621 */
622 static int
623 lf_getlock(struct lockf *lock, struct flock *fl)
624 {
625 struct lockf *block;
626
627 #ifdef LOCKF_DEBUG
628 if (lockf_debug & 1)
629 lf_print("lf_getlock", lock);
630 #endif /* LOCKF_DEBUG */
631
632 if ((block = lf_getblock(lock)) != NULL) {
633 fl->l_type = block->lf_type;
634 fl->l_whence = SEEK_SET;
635 fl->l_start = block->lf_start;
636 if (block->lf_end == -1)
637 fl->l_len = 0;
638 else
639 fl->l_len = block->lf_end - block->lf_start + 1;
640 if (block->lf_flags & F_POSIX)
641 fl->l_pid = ((struct proc *)block->lf_id)->p_pid;
642 else
643 fl->l_pid = -1;
644 } else {
645 fl->l_type = F_UNLCK;
646 }
647 return 0;
648 }
649
650 /*
651 * Walk the list of locks for an inode and
652 * return the first blocking lock.
653 */
654 static struct lockf *
655 lf_getblock(struct lockf *lock)
656 {
657 struct lockf **prev, *overlap, *lf = *(lock->lf_head);
658
659 prev = lock->lf_head;
660 while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
661 /*
662 * We've found an overlap, see if it blocks us
663 */
664 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
665 return overlap;
666 /*
667 * Nope, point to the next one on the list and
668 * see if it blocks us
669 */
670 lf = overlap->lf_next;
671 }
672 return NOLOCKF;
673 }
674
675 /*
676 * Walk the list of locks for an inode to
677 * find an overlapping lock (if any).
678 *
679 * NOTE: this returns only the FIRST overlapping lock. There
680 * may be more than one.
681 */
682 static int
683 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
684 struct lockf ***prev, struct lockf **overlap)
685 {
686 off_t start, end;
687
688 *overlap = lf;
689 if (lf == NOLOCKF)
690 return 0;
691 #ifdef LOCKF_DEBUG
692 if (lockf_debug & 2)
693 lf_print("lf_findoverlap: looking for overlap in", lock);
694 #endif /* LOCKF_DEBUG */
695 start = lock->lf_start;
696 end = lock->lf_end;
697 while (lf != NOLOCKF) {
698 if (((type == SELF) && lf->lf_id != lock->lf_id) ||
699 ((type == OTHERS) && lf->lf_id == lock->lf_id)) {
700 *prev = &lf->lf_next;
701 *overlap = lf = lf->lf_next;
702 continue;
703 }
704 #ifdef LOCKF_DEBUG
705 if (lockf_debug & 2)
706 lf_print("\tchecking", lf);
707 #endif /* LOCKF_DEBUG */
708 /*
709 * OK, check for overlap
710 *
711 * Six cases:
712 * 0) no overlap
713 * 1) overlap == lock
714 * 2) overlap contains lock
715 * 3) lock contains overlap
716 * 4) overlap starts before lock
717 * 5) overlap ends after lock
718 */
719 if ((lf->lf_end != -1 && start > lf->lf_end) ||
720 (end != -1 && lf->lf_start > end)) {
721 /* Case 0 */
722 #ifdef LOCKF_DEBUG
723 if (lockf_debug & 2)
724 printf("no overlap\n");
725 #endif /* LOCKF_DEBUG */
726 if ((type & SELF) && end != -1 && lf->lf_start > end)
727 return 0;
728 *prev = &lf->lf_next;
729 *overlap = lf = lf->lf_next;
730 continue;
731 }
732 if ((lf->lf_start == start) && (lf->lf_end == end)) {
733 /* Case 1 */
734 #ifdef LOCKF_DEBUG
735 if (lockf_debug & 2)
736 printf("overlap == lock\n");
737 #endif /* LOCKF_DEBUG */
738 return 1;
739 }
740 if ((lf->lf_start <= start) &&
741 (end != -1) &&
742 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
743 /* Case 2 */
744 #ifdef LOCKF_DEBUG
745 if (lockf_debug & 2)
746 printf("overlap contains lock\n");
747 #endif /* LOCKF_DEBUG */
748 return 2;
749 }
750 if (start <= lf->lf_start &&
751 (end == -1 ||
752 (lf->lf_end != -1 && end >= lf->lf_end))) {
753 /* Case 3 */
754 #ifdef LOCKF_DEBUG
755 if (lockf_debug & 2)
756 printf("lock contains overlap\n");
757 #endif /* LOCKF_DEBUG */
758 return 3;
759 }
760 if ((lf->lf_start < start) &&
761 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
762 /* Case 4 */
763 #ifdef LOCKF_DEBUG
764 if (lockf_debug & 2)
765 printf("overlap starts before lock\n");
766 #endif /* LOCKF_DEBUG */
767 return 4;
768 }
769 if ((lf->lf_start > start) &&
770 (end != -1) &&
771 ((lf->lf_end > end) || (lf->lf_end == -1))) {
772 /* Case 5 */
773 #ifdef LOCKF_DEBUG
774 if (lockf_debug & 2)
775 printf("overlap ends after lock\n");
776 #endif /* LOCKF_DEBUG */
777 return 5;
778 }
779 panic("lf_findoverlap: default");
780 }
781 return 0;
782 }
783
784 /*
785 * Split a lock and a contained region into
786 * two or three locks as necessary.
787 */
788 static void
789 lf_split(struct lockf *lock1, struct lockf *lock2, struct lockf **sparelock)
790 {
791 struct lockf *splitlock;
792
793 #ifdef LOCKF_DEBUG
794 if (lockf_debug & 2) {
795 lf_print("lf_split", lock1);
796 lf_print("splitting from", lock2);
797 }
798 #endif /* LOCKF_DEBUG */
799 /*
800 * Check to see if spliting into only two pieces.
801 */
802 if (lock1->lf_start == lock2->lf_start) {
803 lock1->lf_start = lock2->lf_end + 1;
804 lock2->lf_next = lock1;
805 return;
806 }
807 if (lock1->lf_end == lock2->lf_end) {
808 lock1->lf_end = lock2->lf_start - 1;
809 lock2->lf_next = lock1->lf_next;
810 lock1->lf_next = lock2;
811 return;
812 }
813 /*
814 * Make a new lock consisting of the last part of
815 * the encompassing lock
816 */
817 splitlock = *sparelock;
818 *sparelock = NULL;
819 memcpy(splitlock, lock1, sizeof(*splitlock));
820 splitlock->lf_start = lock2->lf_end + 1;
821 TAILQ_INIT(&splitlock->lf_blkhd);
822 lock1->lf_end = lock2->lf_start - 1;
823 /*
824 * OK, now link it in
825 */
826 splitlock->lf_next = lock1->lf_next;
827 lock2->lf_next = splitlock;
828 lock1->lf_next = lock2;
829 }
830
831 /*
832 * Wakeup a blocklist
833 */
834 static void
835 lf_wakelock(struct lockf *listhead)
836 {
837 struct lockf *wakelock;
838
839 while ((wakelock = TAILQ_FIRST(&listhead->lf_blkhd))) {
840 KASSERT(wakelock->lf_next == listhead);
841 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
842 wakelock->lf_next = NOLOCKF;
843 #ifdef LOCKF_DEBUG
844 if (lockf_debug & 2)
845 lf_print("lf_wakelock: awakening", wakelock);
846 #endif
847 wakeup(wakelock);
848 }
849 }
850
851 #ifdef LOCKF_DEBUG
852 /*
853 * Print out a lock.
854 */
855 static void
856 lf_print(char *tag, struct lockf *lock)
857 {
858
859 printf("%s: lock %p for ", tag, lock);
860 if (lock->lf_flags & F_POSIX)
861 printf("proc %d", ((struct proc *)lock->lf_id)->p_pid);
862 else
863 printf("file %p", (struct file *)lock->lf_id);
864 printf(" %s, start %qx, end %qx",
865 lock->lf_type == F_RDLCK ? "shared" :
866 lock->lf_type == F_WRLCK ? "exclusive" :
867 lock->lf_type == F_UNLCK ? "unlock" :
868 "unknown", lock->lf_start, lock->lf_end);
869 if (TAILQ_FIRST(&lock->lf_blkhd))
870 printf(" block %p\n", TAILQ_FIRST(&lock->lf_blkhd));
871 else
872 printf("\n");
873 }
874
875 static void
876 lf_printlist(char *tag, struct lockf *lock)
877 {
878 struct lockf *lf, *blk;
879
880 printf("%s: Lock list:\n", tag);
881 for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
882 printf("\tlock %p for ", lf);
883 if (lf->lf_flags & F_POSIX)
884 printf("proc %d", ((struct proc *)lf->lf_id)->p_pid);
885 else
886 printf("file %p", (struct file *)lf->lf_id);
887 printf(", %s, start %qx, end %qx",
888 lf->lf_type == F_RDLCK ? "shared" :
889 lf->lf_type == F_WRLCK ? "exclusive" :
890 lf->lf_type == F_UNLCK ? "unlock" :
891 "unknown", lf->lf_start, lf->lf_end);
892 TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
893 if (blk->lf_flags & F_POSIX)
894 printf("proc %d",
895 ((struct proc *)blk->lf_id)->p_pid);
896 else
897 printf("file %p", (struct file *)blk->lf_id);
898 printf(", %s, start %qx, end %qx",
899 blk->lf_type == F_RDLCK ? "shared" :
900 blk->lf_type == F_WRLCK ? "exclusive" :
901 blk->lf_type == F_UNLCK ? "unlock" :
902 "unknown", blk->lf_start, blk->lf_end);
903 if (TAILQ_FIRST(&blk->lf_blkhd))
904 panic("lf_printlist: bad list");
905 }
906 printf("\n");
907 }
908 }
909 #endif /* LOCKF_DEBUG */
910