vfs_lockf.c revision 1.39 1 /* $NetBSD: vfs_lockf.c,v 1.39 2005/03/25 22:48:23 christos Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Scooter Morris at Genentech Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)ufs_lockf.c 8.4 (Berkeley) 10/26/94
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.39 2005/03/25 22:48:23 christos Exp $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/file.h>
44 #include <sys/proc.h>
45 #include <sys/vnode.h>
46 #include <sys/pool.h>
47 #include <sys/fcntl.h>
48 #include <sys/lockf.h>
49
50 POOL_INIT(lockfpool, sizeof(struct lockf), 0, 0, 0, "lockfpl",
51 &pool_allocator_nointr);
52
53 /*
54 * This variable controls the maximum number of processes that will
55 * be checked in doing deadlock detection.
56 */
57 int maxlockdepth = MAXDEPTH;
58
59 #ifdef LOCKF_DEBUG
60 int lockf_debug = 0;
61 #endif
62
63 #define NOLOCKF (struct lockf *)0
64 #define SELF 0x1
65 #define OTHERS 0x2
66
67 static int lf_clearlock(struct lockf *, struct lockf **);
68 static int lf_findoverlap(struct lockf *,
69 struct lockf *, int, struct lockf ***, struct lockf **);
70 static struct lockf *lf_getblock(struct lockf *);
71 static int lf_getlock(struct lockf *, struct flock *);
72 static int lf_setlock(struct lockf *, struct lockf **, struct simplelock *);
73 static void lf_split(struct lockf *, struct lockf *, struct lockf **);
74 static void lf_wakelock(struct lockf *);
75 static struct lockf *lf_alloc(uid_t, int);
76 static void lf_free(struct lockf *);
77
78
79 #ifdef LOCKF_DEBUG
80 static void lf_print(char *, struct lockf *);
81 static void lf_printlist(char *, struct lockf *);
82 #endif
83
84 /*
85 * XXX TODO
86 * Misc cleanups: "caddr_t id" should be visible in the API as a
87 * "struct proc *".
88 * (This requires rototilling all VFS's which support advisory locking).
89 */
90
91 /*
92 * If there's a lot of lock contention on a single vnode, locking
93 * schemes which allow for more paralleism would be needed. Given how
94 * infrequently byte-range locks are actually used in typical BSD
95 * code, a more complex approach probably isn't worth it.
96 */
97
98 /*
99 * We enforce a limit on locks by uid, so that a single user cannot
100 * run the kernel out of memory. For now, the limit is pretty coarse.
101 * There is no limit on root.
102 *
103 * Splitting a lock will always succeed, regardless of current allocations.
104 * If you're slightly above the limit, we still have to permit an allocation
105 * so that the unlock can succeed. If the unlocking causes too many splits,
106 * however, you're totally cutoff.
107 */
108 int maxlocksperuid = 1024;
109
110 /*
111 * 3 options for allowfail.
112 * 0 - always allocate. 1 - cutoff at limit. 2 - cutoff at double limit.
113 */
114 struct lockf *
115 lf_alloc(uid_t uid, int allowfail)
116 {
117 struct uidinfo *uip;
118 struct lockf *lock;
119
120 uip = uid_find(uid);
121 if (uid && allowfail && uip->ui_lockcnt >
122 (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2)))
123 return (NULL);
124 uip->ui_lockcnt++;
125 lock = pool_get(&lockfpool, PR_WAITOK);
126 lock->lf_uid = uid;
127 return (lock);
128 }
129
130 void
131 lf_free(struct lockf *lock)
132 {
133 struct uidinfo *uip;
134
135 uip = uid_find(lock->lf_uid);
136 uip->ui_lockcnt--;
137 pool_put(&lockfpool, lock);
138 }
139
140 /*
141 * Do an advisory lock operation.
142 */
143 int
144 lf_advlock(struct vop_advlock_args *ap, struct lockf **head, off_t size)
145 {
146 struct proc *p = curproc;
147 struct flock *fl = ap->a_fl;
148 struct lockf *lock = NULL;
149 struct lockf *sparelock;
150 struct simplelock *interlock = &ap->a_vp->v_interlock;
151 off_t start, end;
152 int error = 0;
153
154 /*
155 * Convert the flock structure into a start and end.
156 */
157 switch (fl->l_whence) {
158 case SEEK_SET:
159 case SEEK_CUR:
160 /*
161 * Caller is responsible for adding any necessary offset
162 * when SEEK_CUR is used.
163 */
164 start = fl->l_start;
165 break;
166
167 case SEEK_END:
168 start = size + fl->l_start;
169 break;
170
171 default:
172 return EINVAL;
173 }
174 if (start < 0)
175 return EINVAL;
176
177 /*
178 * allocate locks before acquire simple lock.
179 * we need two locks in the worst case.
180 */
181 switch (ap->a_op) {
182 case F_SETLK:
183 case F_UNLCK:
184 /*
185 * XXX for F_UNLCK case, we can re-use lock.
186 */
187 if ((fl->l_type & F_FLOCK) == 0) {
188 /*
189 * byte-range lock might need one more lock.
190 */
191 sparelock = lf_alloc(p->p_ucred->cr_uid, 0);
192 if (sparelock == NULL) {
193 error = ENOMEM;
194 goto quit;
195 }
196 break;
197 }
198 /* FALLTHROUGH */
199
200 case F_GETLK:
201 sparelock = NULL;
202 break;
203
204 default:
205 return EINVAL;
206 }
207
208 lock = lf_alloc(p->p_ucred->cr_uid, ap->a_op != F_UNLCK ? 1 : 2);
209 if (lock == NULL) {
210 error = ENOMEM;
211 goto quit;
212 }
213
214 simple_lock(interlock);
215
216 /*
217 * Avoid the common case of unlocking when inode has no locks.
218 */
219 if (*head == (struct lockf *)0) {
220 if (ap->a_op != F_SETLK) {
221 fl->l_type = F_UNLCK;
222 error = 0;
223 goto quit_unlock;
224 }
225 }
226
227 if (fl->l_len == 0)
228 end = -1;
229 else
230 end = start + fl->l_len - 1;
231 /*
232 * Create the lockf structure.
233 */
234 lock->lf_start = start;
235 lock->lf_end = end;
236 /* XXX NJWLWP
237 * I don't want to make the entire VFS universe use LWPs, because
238 * they don't need them, for the most part. This is an exception,
239 * and a kluge.
240 */
241
242 lock->lf_head = head;
243 lock->lf_type = fl->l_type;
244 lock->lf_next = (struct lockf *)0;
245 TAILQ_INIT(&lock->lf_blkhd);
246 lock->lf_flags = ap->a_flags;
247 if (lock->lf_flags & F_POSIX) {
248 KASSERT(curproc == (struct proc *)ap->a_id);
249 }
250 lock->lf_id = (struct proc *)ap->a_id;
251 lock->lf_lwp = curlwp;
252
253 /*
254 * Do the requested operation.
255 */
256 switch (ap->a_op) {
257
258 case F_SETLK:
259 error = lf_setlock(lock, &sparelock, interlock);
260 lock = NULL; /* lf_setlock freed it */
261 break;
262
263 case F_UNLCK:
264 error = lf_clearlock(lock, &sparelock);
265 break;
266
267 case F_GETLK:
268 error = lf_getlock(lock, fl);
269 break;
270
271 default:
272 break;
273 /* NOTREACHED */
274 }
275
276 quit_unlock:
277 simple_unlock(interlock);
278 quit:
279 if (lock)
280 lf_free(lock);
281 if (sparelock)
282 lf_free(sparelock);
283
284 return error;
285 }
286
287 /*
288 * Set a byte-range lock.
289 */
290 static int
291 lf_setlock(struct lockf *lock, struct lockf **sparelock,
292 struct simplelock *interlock)
293 {
294 struct lockf *block;
295 struct lockf **head = lock->lf_head;
296 struct lockf **prev, *overlap, *ltmp;
297 static char lockstr[] = "lockf";
298 int ovcase, priority, needtolink, error;
299
300 #ifdef LOCKF_DEBUG
301 if (lockf_debug & 1)
302 lf_print("lf_setlock", lock);
303 #endif /* LOCKF_DEBUG */
304
305 /*
306 * Set the priority
307 */
308 priority = PLOCK;
309 if (lock->lf_type == F_WRLCK)
310 priority += 4;
311 priority |= PCATCH;
312 /*
313 * Scan lock list for this file looking for locks that would block us.
314 */
315 while ((block = lf_getblock(lock)) != NULL) {
316 /*
317 * Free the structure and return if nonblocking.
318 */
319 if ((lock->lf_flags & F_WAIT) == 0) {
320 lf_free(lock);
321 return EAGAIN;
322 }
323 /*
324 * We are blocked. Since flock style locks cover
325 * the whole file, there is no chance for deadlock.
326 * For byte-range locks we must check for deadlock.
327 *
328 * Deadlock detection is done by looking through the
329 * wait channels to see if there are any cycles that
330 * involve us. MAXDEPTH is set just to make sure we
331 * do not go off into neverneverland.
332 */
333 if ((lock->lf_flags & F_POSIX) &&
334 (block->lf_flags & F_POSIX)) {
335 struct lwp *wlwp;
336 struct lockf *waitblock;
337 int i = 0;
338
339 /*
340 * The block is waiting on something. if_lwp will be
341 * 0 once the lock is granted, so we terminate the
342 * loop if we find this.
343 */
344 wlwp = block->lf_lwp;
345 while (wlwp && (i++ < maxlockdepth)) {
346 waitblock = (struct lockf *)wlwp->l_wchan;
347 /* Get the owner of the blocking lock */
348 waitblock = waitblock->lf_next;
349 if ((waitblock->lf_flags & F_POSIX) == 0)
350 break;
351 wlwp = waitblock->lf_lwp;
352 if (wlwp == lock->lf_lwp) {
353 lf_free(lock);
354 return EDEADLK;
355 }
356 }
357 /*
358 * If we're still following a dependency chain
359 * after maxlockdepth iterations, assume we're in
360 * a cycle to be safe.
361 */
362 if (i >= maxlockdepth) {
363 lf_free(lock);
364 return EDEADLK;
365 }
366 }
367 /*
368 * For flock type locks, we must first remove
369 * any shared locks that we hold before we sleep
370 * waiting for an exclusive lock.
371 */
372 if ((lock->lf_flags & F_FLOCK) &&
373 lock->lf_type == F_WRLCK) {
374 lock->lf_type = F_UNLCK;
375 (void) lf_clearlock(lock, NULL);
376 lock->lf_type = F_WRLCK;
377 }
378 /*
379 * Add our lock to the blocked list and sleep until we're free.
380 * Remember who blocked us (for deadlock detection).
381 */
382 lock->lf_next = block;
383 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
384 #ifdef LOCKF_DEBUG
385 if (lockf_debug & 1) {
386 lf_print("lf_setlock: blocking on", block);
387 lf_printlist("lf_setlock", block);
388 }
389 #endif /* LOCKF_DEBUG */
390 error = ltsleep(lock, priority, lockstr, 0, interlock);
391
392 /*
393 * We may have been awakened by a signal (in
394 * which case we must remove ourselves from the
395 * blocked list) and/or by another process
396 * releasing a lock (in which case we have already
397 * been removed from the blocked list and our
398 * lf_next field set to NOLOCKF).
399 */
400 if (lock->lf_next != NOLOCKF) {
401 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
402 lock->lf_next = NOLOCKF;
403 }
404 if (error) {
405 lf_free(lock);
406 return error;
407 }
408 }
409 /*
410 * No blocks!! Add the lock. Note that we will
411 * downgrade or upgrade any overlapping locks this
412 * process already owns.
413 *
414 * Skip over locks owned by other processes.
415 * Handle any locks that overlap and are owned by ourselves.
416 */
417 lock->lf_lwp = 0;
418 prev = head;
419 block = *head;
420 needtolink = 1;
421 for (;;) {
422 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
423 if (ovcase)
424 block = overlap->lf_next;
425 /*
426 * Six cases:
427 * 0) no overlap
428 * 1) overlap == lock
429 * 2) overlap contains lock
430 * 3) lock contains overlap
431 * 4) overlap starts before lock
432 * 5) overlap ends after lock
433 */
434 switch (ovcase) {
435 case 0: /* no overlap */
436 if (needtolink) {
437 *prev = lock;
438 lock->lf_next = overlap;
439 }
440 break;
441
442 case 1: /* overlap == lock */
443 /*
444 * If downgrading lock, others may be
445 * able to acquire it.
446 */
447 if (lock->lf_type == F_RDLCK &&
448 overlap->lf_type == F_WRLCK)
449 lf_wakelock(overlap);
450 overlap->lf_type = lock->lf_type;
451 lf_free(lock);
452 lock = overlap; /* for debug output below */
453 break;
454
455 case 2: /* overlap contains lock */
456 /*
457 * Check for common starting point and different types.
458 */
459 if (overlap->lf_type == lock->lf_type) {
460 lf_free(lock);
461 lock = overlap; /* for debug output below */
462 break;
463 }
464 if (overlap->lf_start == lock->lf_start) {
465 *prev = lock;
466 lock->lf_next = overlap;
467 overlap->lf_start = lock->lf_end + 1;
468 } else
469 lf_split(overlap, lock, sparelock);
470 lf_wakelock(overlap);
471 break;
472
473 case 3: /* lock contains overlap */
474 /*
475 * If downgrading lock, others may be able to
476 * acquire it, otherwise take the list.
477 */
478 if (lock->lf_type == F_RDLCK &&
479 overlap->lf_type == F_WRLCK) {
480 lf_wakelock(overlap);
481 } else {
482 while ((ltmp = TAILQ_FIRST(&overlap->lf_blkhd))) {
483 KASSERT(ltmp->lf_next == overlap);
484 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
485 lf_block);
486 ltmp->lf_next = lock;
487 TAILQ_INSERT_TAIL(&lock->lf_blkhd,
488 ltmp, lf_block);
489 }
490 }
491 /*
492 * Add the new lock if necessary and delete the overlap.
493 */
494 if (needtolink) {
495 *prev = lock;
496 lock->lf_next = overlap->lf_next;
497 prev = &lock->lf_next;
498 needtolink = 0;
499 } else
500 *prev = overlap->lf_next;
501 lf_free(overlap);
502 continue;
503
504 case 4: /* overlap starts before lock */
505 /*
506 * Add lock after overlap on the list.
507 */
508 lock->lf_next = overlap->lf_next;
509 overlap->lf_next = lock;
510 overlap->lf_end = lock->lf_start - 1;
511 prev = &lock->lf_next;
512 lf_wakelock(overlap);
513 needtolink = 0;
514 continue;
515
516 case 5: /* overlap ends after lock */
517 /*
518 * Add the new lock before overlap.
519 */
520 if (needtolink) {
521 *prev = lock;
522 lock->lf_next = overlap;
523 }
524 overlap->lf_start = lock->lf_end + 1;
525 lf_wakelock(overlap);
526 break;
527 }
528 break;
529 }
530 #ifdef LOCKF_DEBUG
531 if (lockf_debug & 1) {
532 lf_print("lf_setlock: got the lock", lock);
533 lf_printlist("lf_setlock", lock);
534 }
535 #endif /* LOCKF_DEBUG */
536 return 0;
537 }
538
539 /*
540 * Remove a byte-range lock on an inode.
541 *
542 * Generally, find the lock (or an overlap to that lock)
543 * and remove it (or shrink it), then wakeup anyone we can.
544 */
545 static int
546 lf_clearlock(struct lockf *unlock, struct lockf **sparelock)
547 {
548 struct lockf **head = unlock->lf_head;
549 struct lockf *lf = *head;
550 struct lockf *overlap, **prev;
551 int ovcase;
552
553 if (lf == NOLOCKF)
554 return 0;
555 #ifdef LOCKF_DEBUG
556 if (unlock->lf_type != F_UNLCK)
557 panic("lf_clearlock: bad type");
558 if (lockf_debug & 1)
559 lf_print("lf_clearlock", unlock);
560 #endif /* LOCKF_DEBUG */
561 prev = head;
562 while ((ovcase = lf_findoverlap(lf, unlock, SELF,
563 &prev, &overlap)) != 0) {
564 /*
565 * Wakeup the list of locks to be retried.
566 */
567 lf_wakelock(overlap);
568
569 switch (ovcase) {
570
571 case 1: /* overlap == lock */
572 *prev = overlap->lf_next;
573 lf_free(overlap);
574 break;
575
576 case 2: /* overlap contains lock: split it */
577 if (overlap->lf_start == unlock->lf_start) {
578 overlap->lf_start = unlock->lf_end + 1;
579 break;
580 }
581 lf_split(overlap, unlock, sparelock);
582 overlap->lf_next = unlock->lf_next;
583 break;
584
585 case 3: /* lock contains overlap */
586 *prev = overlap->lf_next;
587 lf = overlap->lf_next;
588 lf_free(overlap);
589 continue;
590
591 case 4: /* overlap starts before lock */
592 overlap->lf_end = unlock->lf_start - 1;
593 prev = &overlap->lf_next;
594 lf = overlap->lf_next;
595 continue;
596
597 case 5: /* overlap ends after lock */
598 overlap->lf_start = unlock->lf_end + 1;
599 break;
600 }
601 break;
602 }
603 #ifdef LOCKF_DEBUG
604 if (lockf_debug & 1)
605 lf_printlist("lf_clearlock", unlock);
606 #endif /* LOCKF_DEBUG */
607 return 0;
608 }
609
610 /*
611 * Check whether there is a blocking lock,
612 * and if so return its process identifier.
613 */
614 static int
615 lf_getlock(struct lockf *lock, struct flock *fl)
616 {
617 struct lockf *block;
618
619 #ifdef LOCKF_DEBUG
620 if (lockf_debug & 1)
621 lf_print("lf_getlock", lock);
622 #endif /* LOCKF_DEBUG */
623
624 if ((block = lf_getblock(lock)) != NULL) {
625 fl->l_type = block->lf_type;
626 fl->l_whence = SEEK_SET;
627 fl->l_start = block->lf_start;
628 if (block->lf_end == -1)
629 fl->l_len = 0;
630 else
631 fl->l_len = block->lf_end - block->lf_start + 1;
632 if (block->lf_flags & F_POSIX)
633 fl->l_pid = ((struct proc *)block->lf_id)->p_pid;
634 else
635 fl->l_pid = -1;
636 } else {
637 fl->l_type = F_UNLCK;
638 }
639 return 0;
640 }
641
642 /*
643 * Walk the list of locks for an inode and
644 * return the first blocking lock.
645 */
646 static struct lockf *
647 lf_getblock(struct lockf *lock)
648 {
649 struct lockf **prev, *overlap, *lf = *(lock->lf_head);
650
651 prev = lock->lf_head;
652 while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
653 /*
654 * We've found an overlap, see if it blocks us
655 */
656 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
657 return overlap;
658 /*
659 * Nope, point to the next one on the list and
660 * see if it blocks us
661 */
662 lf = overlap->lf_next;
663 }
664 return NOLOCKF;
665 }
666
667 /*
668 * Walk the list of locks for an inode to
669 * find an overlapping lock (if any).
670 *
671 * NOTE: this returns only the FIRST overlapping lock. There
672 * may be more than one.
673 */
674 static int
675 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
676 struct lockf ***prev, struct lockf **overlap)
677 {
678 off_t start, end;
679
680 *overlap = lf;
681 if (lf == NOLOCKF)
682 return 0;
683 #ifdef LOCKF_DEBUG
684 if (lockf_debug & 2)
685 lf_print("lf_findoverlap: looking for overlap in", lock);
686 #endif /* LOCKF_DEBUG */
687 start = lock->lf_start;
688 end = lock->lf_end;
689 while (lf != NOLOCKF) {
690 if (((type == SELF) && lf->lf_id != lock->lf_id) ||
691 ((type == OTHERS) && lf->lf_id == lock->lf_id)) {
692 *prev = &lf->lf_next;
693 *overlap = lf = lf->lf_next;
694 continue;
695 }
696 #ifdef LOCKF_DEBUG
697 if (lockf_debug & 2)
698 lf_print("\tchecking", lf);
699 #endif /* LOCKF_DEBUG */
700 /*
701 * OK, check for overlap
702 *
703 * Six cases:
704 * 0) no overlap
705 * 1) overlap == lock
706 * 2) overlap contains lock
707 * 3) lock contains overlap
708 * 4) overlap starts before lock
709 * 5) overlap ends after lock
710 */
711 if ((lf->lf_end != -1 && start > lf->lf_end) ||
712 (end != -1 && lf->lf_start > end)) {
713 /* Case 0 */
714 #ifdef LOCKF_DEBUG
715 if (lockf_debug & 2)
716 printf("no overlap\n");
717 #endif /* LOCKF_DEBUG */
718 if ((type & SELF) && end != -1 && lf->lf_start > end)
719 return 0;
720 *prev = &lf->lf_next;
721 *overlap = lf = lf->lf_next;
722 continue;
723 }
724 if ((lf->lf_start == start) && (lf->lf_end == end)) {
725 /* Case 1 */
726 #ifdef LOCKF_DEBUG
727 if (lockf_debug & 2)
728 printf("overlap == lock\n");
729 #endif /* LOCKF_DEBUG */
730 return 1;
731 }
732 if ((lf->lf_start <= start) &&
733 (end != -1) &&
734 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
735 /* Case 2 */
736 #ifdef LOCKF_DEBUG
737 if (lockf_debug & 2)
738 printf("overlap contains lock\n");
739 #endif /* LOCKF_DEBUG */
740 return 2;
741 }
742 if (start <= lf->lf_start &&
743 (end == -1 ||
744 (lf->lf_end != -1 && end >= lf->lf_end))) {
745 /* Case 3 */
746 #ifdef LOCKF_DEBUG
747 if (lockf_debug & 2)
748 printf("lock contains overlap\n");
749 #endif /* LOCKF_DEBUG */
750 return 3;
751 }
752 if ((lf->lf_start < start) &&
753 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
754 /* Case 4 */
755 #ifdef LOCKF_DEBUG
756 if (lockf_debug & 2)
757 printf("overlap starts before lock\n");
758 #endif /* LOCKF_DEBUG */
759 return 4;
760 }
761 if ((lf->lf_start > start) &&
762 (end != -1) &&
763 ((lf->lf_end > end) || (lf->lf_end == -1))) {
764 /* Case 5 */
765 #ifdef LOCKF_DEBUG
766 if (lockf_debug & 2)
767 printf("overlap ends after lock\n");
768 #endif /* LOCKF_DEBUG */
769 return 5;
770 }
771 panic("lf_findoverlap: default");
772 }
773 return 0;
774 }
775
776 /*
777 * Split a lock and a contained region into
778 * two or three locks as necessary.
779 */
780 static void
781 lf_split(struct lockf *lock1, struct lockf *lock2, struct lockf **sparelock)
782 {
783 struct lockf *splitlock;
784
785 #ifdef LOCKF_DEBUG
786 if (lockf_debug & 2) {
787 lf_print("lf_split", lock1);
788 lf_print("splitting from", lock2);
789 }
790 #endif /* LOCKF_DEBUG */
791 /*
792 * Check to see if spliting into only two pieces.
793 */
794 if (lock1->lf_start == lock2->lf_start) {
795 lock1->lf_start = lock2->lf_end + 1;
796 lock2->lf_next = lock1;
797 return;
798 }
799 if (lock1->lf_end == lock2->lf_end) {
800 lock1->lf_end = lock2->lf_start - 1;
801 lock2->lf_next = lock1->lf_next;
802 lock1->lf_next = lock2;
803 return;
804 }
805 /*
806 * Make a new lock consisting of the last part of
807 * the encompassing lock
808 */
809 splitlock = *sparelock;
810 *sparelock = NULL;
811 memcpy(splitlock, lock1, sizeof(*splitlock));
812 splitlock->lf_start = lock2->lf_end + 1;
813 TAILQ_INIT(&splitlock->lf_blkhd);
814 lock1->lf_end = lock2->lf_start - 1;
815 /*
816 * OK, now link it in
817 */
818 splitlock->lf_next = lock1->lf_next;
819 lock2->lf_next = splitlock;
820 lock1->lf_next = lock2;
821 }
822
823 /*
824 * Wakeup a blocklist
825 */
826 static void
827 lf_wakelock(struct lockf *listhead)
828 {
829 struct lockf *wakelock;
830
831 while ((wakelock = TAILQ_FIRST(&listhead->lf_blkhd))) {
832 KASSERT(wakelock->lf_next == listhead);
833 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
834 wakelock->lf_next = NOLOCKF;
835 #ifdef LOCKF_DEBUG
836 if (lockf_debug & 2)
837 lf_print("lf_wakelock: awakening", wakelock);
838 #endif
839 wakeup(wakelock);
840 }
841 }
842
843 #ifdef LOCKF_DEBUG
844 /*
845 * Print out a lock.
846 */
847 static void
848 lf_print(char *tag, struct lockf *lock)
849 {
850
851 printf("%s: lock %p for ", tag, lock);
852 if (lock->lf_flags & F_POSIX)
853 printf("proc %d", ((struct proc *)lock->lf_id)->p_pid);
854 else
855 printf("file 0x%p", (struct file *)lock->lf_id);
856 printf(" %s, start %qx, end %qx",
857 lock->lf_type == F_RDLCK ? "shared" :
858 lock->lf_type == F_WRLCK ? "exclusive" :
859 lock->lf_type == F_UNLCK ? "unlock" :
860 "unknown", lock->lf_start, lock->lf_end);
861 if (TAILQ_FIRST(&lock->lf_blkhd))
862 printf(" block %p\n", TAILQ_FIRST(&lock->lf_blkhd));
863 else
864 printf("\n");
865 }
866
867 static void
868 lf_printlist(char *tag, struct lockf *lock)
869 {
870 struct lockf *lf, *blk;
871
872 printf("%s: Lock list:\n", tag);
873 for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
874 printf("\tlock %p for ", lf);
875 if (lf->lf_flags & F_POSIX)
876 printf("proc %d", ((struct proc *)lf->lf_id)->p_pid);
877 else
878 printf("file 0x%p", (struct file *)lf->lf_id);
879 printf(", %s, start %qx, end %qx",
880 lf->lf_type == F_RDLCK ? "shared" :
881 lf->lf_type == F_WRLCK ? "exclusive" :
882 lf->lf_type == F_UNLCK ? "unlock" :
883 "unknown", lf->lf_start, lf->lf_end);
884 TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
885 if (blk->lf_flags & F_POSIX)
886 printf("proc %d",
887 ((struct proc *)blk->lf_id)->p_pid);
888 else
889 printf("file 0x%p", (struct file *)blk->lf_id);
890 printf(", %s, start %qx, end %qx",
891 blk->lf_type == F_RDLCK ? "shared" :
892 blk->lf_type == F_WRLCK ? "exclusive" :
893 blk->lf_type == F_UNLCK ? "unlock" :
894 "unknown", blk->lf_start, blk->lf_end);
895 if (TAILQ_FIRST(&blk->lf_blkhd))
896 panic("lf_printlist: bad list");
897 }
898 printf("\n");
899 }
900 }
901 #endif /* LOCKF_DEBUG */
902