vfs_lockf.c revision 1.46 1 /* $NetBSD: vfs_lockf.c,v 1.46 2005/10/28 15:37:23 christos Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Scooter Morris at Genentech Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)ufs_lockf.c 8.4 (Berkeley) 10/26/94
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.46 2005/10/28 15:37:23 christos Exp $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/file.h>
44 #include <sys/proc.h>
45 #include <sys/vnode.h>
46 #include <sys/pool.h>
47 #include <sys/fcntl.h>
48 #include <sys/lockf.h>
49
50 POOL_INIT(lockfpool, sizeof(struct lockf), 0, 0, 0, "lockfpl",
51 &pool_allocator_nointr);
52
53 /*
54 * This variable controls the maximum number of processes that will
55 * be checked in doing deadlock detection.
56 */
57 int maxlockdepth = MAXDEPTH;
58
59 #ifdef LOCKF_DEBUG
60 int lockf_debug = 0;
61 #endif
62
63 #define NOLOCKF (struct lockf *)0
64 #define SELF 0x1
65 #define OTHERS 0x2
66
67 /*
68 * XXX TODO
69 * Misc cleanups: "caddr_t id" should be visible in the API as a
70 * "struct proc *".
71 * (This requires rototilling all VFS's which support advisory locking).
72 */
73
74 /*
75 * If there's a lot of lock contention on a single vnode, locking
76 * schemes which allow for more paralleism would be needed. Given how
77 * infrequently byte-range locks are actually used in typical BSD
78 * code, a more complex approach probably isn't worth it.
79 */
80
81 /*
82 * We enforce a limit on locks by uid, so that a single user cannot
83 * run the kernel out of memory. For now, the limit is pretty coarse.
84 * There is no limit on root.
85 *
86 * Splitting a lock will always succeed, regardless of current allocations.
87 * If you're slightly above the limit, we still have to permit an allocation
88 * so that the unlock can succeed. If the unlocking causes too many splits,
89 * however, you're totally cutoff.
90 */
91 int maxlocksperuid = 1024;
92
93 #ifdef LOCKF_DEBUG
94 /*
95 * Print out a lock.
96 */
97 static void
98 lf_print(char *tag, struct lockf *lock)
99 {
100
101 printf("%s: lock %p for ", tag, lock);
102 if (lock->lf_flags & F_POSIX)
103 printf("proc %d", ((struct proc *)lock->lf_id)->p_pid);
104 else
105 printf("file %p", (struct file *)lock->lf_id);
106 printf(" %s, start %qx, end %qx",
107 lock->lf_type == F_RDLCK ? "shared" :
108 lock->lf_type == F_WRLCK ? "exclusive" :
109 lock->lf_type == F_UNLCK ? "unlock" :
110 "unknown", lock->lf_start, lock->lf_end);
111 if (TAILQ_FIRST(&lock->lf_blkhd))
112 printf(" block %p\n", TAILQ_FIRST(&lock->lf_blkhd));
113 else
114 printf("\n");
115 }
116
117 static void
118 lf_printlist(char *tag, struct lockf *lock)
119 {
120 struct lockf *lf, *blk;
121
122 printf("%s: Lock list:\n", tag);
123 for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
124 printf("\tlock %p for ", lf);
125 if (lf->lf_flags & F_POSIX)
126 printf("proc %d", ((struct proc *)lf->lf_id)->p_pid);
127 else
128 printf("file %p", (struct file *)lf->lf_id);
129 printf(", %s, start %qx, end %qx",
130 lf->lf_type == F_RDLCK ? "shared" :
131 lf->lf_type == F_WRLCK ? "exclusive" :
132 lf->lf_type == F_UNLCK ? "unlock" :
133 "unknown", lf->lf_start, lf->lf_end);
134 TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
135 if (blk->lf_flags & F_POSIX)
136 printf("proc %d",
137 ((struct proc *)blk->lf_id)->p_pid);
138 else
139 printf("file %p", (struct file *)blk->lf_id);
140 printf(", %s, start %qx, end %qx",
141 blk->lf_type == F_RDLCK ? "shared" :
142 blk->lf_type == F_WRLCK ? "exclusive" :
143 blk->lf_type == F_UNLCK ? "unlock" :
144 "unknown", blk->lf_start, blk->lf_end);
145 if (TAILQ_FIRST(&blk->lf_blkhd))
146 panic("lf_printlist: bad list");
147 }
148 printf("\n");
149 }
150 }
151 #endif /* LOCKF_DEBUG */
152
153 /*
154 * 3 options for allowfail.
155 * 0 - always allocate. 1 - cutoff at limit. 2 - cutoff at double limit.
156 */
157 static struct lockf *
158 lf_alloc(uid_t uid, int allowfail)
159 {
160 struct uidinfo *uip;
161 struct lockf *lock;
162 int s;
163
164 uip = uid_find(uid);
165 UILOCK(uip, s);
166 if (uid && allowfail && uip->ui_lockcnt >
167 (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2))) {
168 UIUNLOCK(uip, s);
169 return NULL;
170 }
171 uip->ui_lockcnt++;
172 UIUNLOCK(uip, s);
173 lock = pool_get(&lockfpool, PR_WAITOK);
174 lock->lf_uid = uid;
175 return lock;
176 }
177
178 static void
179 lf_free(struct lockf *lock)
180 {
181 struct uidinfo *uip;
182 int s;
183
184 uip = uid_find(lock->lf_uid);
185 UILOCK(uip, s);
186 uip->ui_lockcnt--;
187 UIUNLOCK(uip, s);
188 pool_put(&lockfpool, lock);
189 }
190
191 /*
192 * Walk the list of locks for an inode to
193 * find an overlapping lock (if any).
194 *
195 * NOTE: this returns only the FIRST overlapping lock. There
196 * may be more than one.
197 */
198 static int
199 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
200 struct lockf ***prev, struct lockf **overlap)
201 {
202 off_t start, end;
203
204 *overlap = lf;
205 if (lf == NOLOCKF)
206 return 0;
207 #ifdef LOCKF_DEBUG
208 if (lockf_debug & 2)
209 lf_print("lf_findoverlap: looking for overlap in", lock);
210 #endif /* LOCKF_DEBUG */
211 start = lock->lf_start;
212 end = lock->lf_end;
213 while (lf != NOLOCKF) {
214 if (((type == SELF) && lf->lf_id != lock->lf_id) ||
215 ((type == OTHERS) && lf->lf_id == lock->lf_id)) {
216 *prev = &lf->lf_next;
217 *overlap = lf = lf->lf_next;
218 continue;
219 }
220 #ifdef LOCKF_DEBUG
221 if (lockf_debug & 2)
222 lf_print("\tchecking", lf);
223 #endif /* LOCKF_DEBUG */
224 /*
225 * OK, check for overlap
226 *
227 * Six cases:
228 * 0) no overlap
229 * 1) overlap == lock
230 * 2) overlap contains lock
231 * 3) lock contains overlap
232 * 4) overlap starts before lock
233 * 5) overlap ends after lock
234 */
235 if ((lf->lf_end != -1 && start > lf->lf_end) ||
236 (end != -1 && lf->lf_start > end)) {
237 /* Case 0 */
238 #ifdef LOCKF_DEBUG
239 if (lockf_debug & 2)
240 printf("no overlap\n");
241 #endif /* LOCKF_DEBUG */
242 if ((type & SELF) && end != -1 && lf->lf_start > end)
243 return 0;
244 *prev = &lf->lf_next;
245 *overlap = lf = lf->lf_next;
246 continue;
247 }
248 if ((lf->lf_start == start) && (lf->lf_end == end)) {
249 /* Case 1 */
250 #ifdef LOCKF_DEBUG
251 if (lockf_debug & 2)
252 printf("overlap == lock\n");
253 #endif /* LOCKF_DEBUG */
254 return 1;
255 }
256 if ((lf->lf_start <= start) &&
257 (end != -1) &&
258 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
259 /* Case 2 */
260 #ifdef LOCKF_DEBUG
261 if (lockf_debug & 2)
262 printf("overlap contains lock\n");
263 #endif /* LOCKF_DEBUG */
264 return 2;
265 }
266 if (start <= lf->lf_start &&
267 (end == -1 ||
268 (lf->lf_end != -1 && end >= lf->lf_end))) {
269 /* Case 3 */
270 #ifdef LOCKF_DEBUG
271 if (lockf_debug & 2)
272 printf("lock contains overlap\n");
273 #endif /* LOCKF_DEBUG */
274 return 3;
275 }
276 if ((lf->lf_start < start) &&
277 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
278 /* Case 4 */
279 #ifdef LOCKF_DEBUG
280 if (lockf_debug & 2)
281 printf("overlap starts before lock\n");
282 #endif /* LOCKF_DEBUG */
283 return 4;
284 }
285 if ((lf->lf_start > start) &&
286 (end != -1) &&
287 ((lf->lf_end > end) || (lf->lf_end == -1))) {
288 /* Case 5 */
289 #ifdef LOCKF_DEBUG
290 if (lockf_debug & 2)
291 printf("overlap ends after lock\n");
292 #endif /* LOCKF_DEBUG */
293 return 5;
294 }
295 panic("lf_findoverlap: default");
296 }
297 return 0;
298 }
299
300 /*
301 * Split a lock and a contained region into
302 * two or three locks as necessary.
303 */
304 static void
305 lf_split(struct lockf *lock1, struct lockf *lock2, struct lockf **sparelock)
306 {
307 struct lockf *splitlock;
308
309 #ifdef LOCKF_DEBUG
310 if (lockf_debug & 2) {
311 lf_print("lf_split", lock1);
312 lf_print("splitting from", lock2);
313 }
314 #endif /* LOCKF_DEBUG */
315 /*
316 * Check to see if spliting into only two pieces.
317 */
318 if (lock1->lf_start == lock2->lf_start) {
319 lock1->lf_start = lock2->lf_end + 1;
320 lock2->lf_next = lock1;
321 return;
322 }
323 if (lock1->lf_end == lock2->lf_end) {
324 lock1->lf_end = lock2->lf_start - 1;
325 lock2->lf_next = lock1->lf_next;
326 lock1->lf_next = lock2;
327 return;
328 }
329 /*
330 * Make a new lock consisting of the last part of
331 * the encompassing lock
332 */
333 splitlock = *sparelock;
334 *sparelock = NULL;
335 memcpy(splitlock, lock1, sizeof(*splitlock));
336 splitlock->lf_start = lock2->lf_end + 1;
337 TAILQ_INIT(&splitlock->lf_blkhd);
338 lock1->lf_end = lock2->lf_start - 1;
339 /*
340 * OK, now link it in
341 */
342 splitlock->lf_next = lock1->lf_next;
343 lock2->lf_next = splitlock;
344 lock1->lf_next = lock2;
345 }
346
347 /*
348 * Wakeup a blocklist
349 */
350 static void
351 lf_wakelock(struct lockf *listhead)
352 {
353 struct lockf *wakelock;
354
355 while ((wakelock = TAILQ_FIRST(&listhead->lf_blkhd))) {
356 KASSERT(wakelock->lf_next == listhead);
357 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
358 wakelock->lf_next = NOLOCKF;
359 #ifdef LOCKF_DEBUG
360 if (lockf_debug & 2)
361 lf_print("lf_wakelock: awakening", wakelock);
362 #endif
363 wakeup(wakelock);
364 }
365 }
366
367 /*
368 * Remove a byte-range lock on an inode.
369 *
370 * Generally, find the lock (or an overlap to that lock)
371 * and remove it (or shrink it), then wakeup anyone we can.
372 */
373 static int
374 lf_clearlock(struct lockf *unlock, struct lockf **sparelock)
375 {
376 struct lockf **head = unlock->lf_head;
377 struct lockf *lf = *head;
378 struct lockf *overlap, **prev;
379 int ovcase;
380
381 if (lf == NOLOCKF)
382 return 0;
383 #ifdef LOCKF_DEBUG
384 if (unlock->lf_type != F_UNLCK)
385 panic("lf_clearlock: bad type");
386 if (lockf_debug & 1)
387 lf_print("lf_clearlock", unlock);
388 #endif /* LOCKF_DEBUG */
389 prev = head;
390 while ((ovcase = lf_findoverlap(lf, unlock, SELF,
391 &prev, &overlap)) != 0) {
392 /*
393 * Wakeup the list of locks to be retried.
394 */
395 lf_wakelock(overlap);
396
397 switch (ovcase) {
398
399 case 1: /* overlap == lock */
400 *prev = overlap->lf_next;
401 lf_free(overlap);
402 break;
403
404 case 2: /* overlap contains lock: split it */
405 if (overlap->lf_start == unlock->lf_start) {
406 overlap->lf_start = unlock->lf_end + 1;
407 break;
408 }
409 lf_split(overlap, unlock, sparelock);
410 overlap->lf_next = unlock->lf_next;
411 break;
412
413 case 3: /* lock contains overlap */
414 *prev = overlap->lf_next;
415 lf = overlap->lf_next;
416 lf_free(overlap);
417 continue;
418
419 case 4: /* overlap starts before lock */
420 overlap->lf_end = unlock->lf_start - 1;
421 prev = &overlap->lf_next;
422 lf = overlap->lf_next;
423 continue;
424
425 case 5: /* overlap ends after lock */
426 overlap->lf_start = unlock->lf_end + 1;
427 break;
428 }
429 break;
430 }
431 #ifdef LOCKF_DEBUG
432 if (lockf_debug & 1)
433 lf_printlist("lf_clearlock", unlock);
434 #endif /* LOCKF_DEBUG */
435 return 0;
436 }
437
438 /*
439 * Walk the list of locks for an inode and
440 * return the first blocking lock.
441 */
442 static struct lockf *
443 lf_getblock(struct lockf *lock)
444 {
445 struct lockf **prev, *overlap, *lf = *(lock->lf_head);
446
447 prev = lock->lf_head;
448 while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
449 /*
450 * We've found an overlap, see if it blocks us
451 */
452 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
453 return overlap;
454 /*
455 * Nope, point to the next one on the list and
456 * see if it blocks us
457 */
458 lf = overlap->lf_next;
459 }
460 return NOLOCKF;
461 }
462
463 /*
464 * Set a byte-range lock.
465 */
466 static int
467 lf_setlock(struct lockf *lock, struct lockf **sparelock,
468 struct simplelock *interlock)
469 {
470 struct lockf *block;
471 struct lockf **head = lock->lf_head;
472 struct lockf **prev, *overlap, *ltmp;
473 static char lockstr[] = "lockf";
474 int ovcase, priority, needtolink, error;
475
476 #ifdef LOCKF_DEBUG
477 if (lockf_debug & 1)
478 lf_print("lf_setlock", lock);
479 #endif /* LOCKF_DEBUG */
480
481 /*
482 * Set the priority
483 */
484 priority = PLOCK;
485 if (lock->lf_type == F_WRLCK)
486 priority += 4;
487 priority |= PCATCH;
488 /*
489 * Scan lock list for this file looking for locks that would block us.
490 */
491 while ((block = lf_getblock(lock)) != NULL) {
492 /*
493 * Free the structure and return if nonblocking.
494 */
495 if ((lock->lf_flags & F_WAIT) == 0) {
496 lf_free(lock);
497 return EAGAIN;
498 }
499 /*
500 * We are blocked. Since flock style locks cover
501 * the whole file, there is no chance for deadlock.
502 * For byte-range locks we must check for deadlock.
503 *
504 * Deadlock detection is done by looking through the
505 * wait channels to see if there are any cycles that
506 * involve us. MAXDEPTH is set just to make sure we
507 * do not go off into neverneverland.
508 */
509 if ((lock->lf_flags & F_POSIX) &&
510 (block->lf_flags & F_POSIX)) {
511 struct lwp *wlwp;
512 __volatile const struct lockf *waitblock;
513 int i = 0;
514
515 /*
516 * The block is waiting on something. if_lwp will be
517 * 0 once the lock is granted, so we terminate the
518 * loop if we find this.
519 */
520 wlwp = block->lf_lwp;
521 while (wlwp && (i++ < maxlockdepth)) {
522 waitblock = wlwp->l_wchan;
523 /* Get the owner of the blocking lock */
524 waitblock = waitblock->lf_next;
525 if ((waitblock->lf_flags & F_POSIX) == 0)
526 break;
527 wlwp = waitblock->lf_lwp;
528 if (wlwp == lock->lf_lwp) {
529 lf_free(lock);
530 return EDEADLK;
531 }
532 }
533 /*
534 * If we're still following a dependency chain
535 * after maxlockdepth iterations, assume we're in
536 * a cycle to be safe.
537 */
538 if (i >= maxlockdepth) {
539 lf_free(lock);
540 return EDEADLK;
541 }
542 }
543 /*
544 * For flock type locks, we must first remove
545 * any shared locks that we hold before we sleep
546 * waiting for an exclusive lock.
547 */
548 if ((lock->lf_flags & F_FLOCK) &&
549 lock->lf_type == F_WRLCK) {
550 lock->lf_type = F_UNLCK;
551 (void) lf_clearlock(lock, NULL);
552 lock->lf_type = F_WRLCK;
553 }
554 /*
555 * Add our lock to the blocked list and sleep until we're free.
556 * Remember who blocked us (for deadlock detection).
557 */
558 lock->lf_next = block;
559 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
560 #ifdef LOCKF_DEBUG
561 if (lockf_debug & 1) {
562 lf_print("lf_setlock: blocking on", block);
563 lf_printlist("lf_setlock", block);
564 }
565 #endif /* LOCKF_DEBUG */
566 error = ltsleep(lock, priority, lockstr, 0, interlock);
567
568 /*
569 * We may have been awakened by a signal (in
570 * which case we must remove ourselves from the
571 * blocked list) and/or by another process
572 * releasing a lock (in which case we have already
573 * been removed from the blocked list and our
574 * lf_next field set to NOLOCKF).
575 */
576 if (lock->lf_next != NOLOCKF) {
577 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
578 lock->lf_next = NOLOCKF;
579 }
580 if (error) {
581 lf_free(lock);
582 return error;
583 }
584 }
585 /*
586 * No blocks!! Add the lock. Note that we will
587 * downgrade or upgrade any overlapping locks this
588 * process already owns.
589 *
590 * Skip over locks owned by other processes.
591 * Handle any locks that overlap and are owned by ourselves.
592 */
593 lock->lf_lwp = 0;
594 prev = head;
595 block = *head;
596 needtolink = 1;
597 for (;;) {
598 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
599 if (ovcase)
600 block = overlap->lf_next;
601 /*
602 * Six cases:
603 * 0) no overlap
604 * 1) overlap == lock
605 * 2) overlap contains lock
606 * 3) lock contains overlap
607 * 4) overlap starts before lock
608 * 5) overlap ends after lock
609 */
610 switch (ovcase) {
611 case 0: /* no overlap */
612 if (needtolink) {
613 *prev = lock;
614 lock->lf_next = overlap;
615 }
616 break;
617
618 case 1: /* overlap == lock */
619 /*
620 * If downgrading lock, others may be
621 * able to acquire it.
622 */
623 if (lock->lf_type == F_RDLCK &&
624 overlap->lf_type == F_WRLCK)
625 lf_wakelock(overlap);
626 overlap->lf_type = lock->lf_type;
627 lf_free(lock);
628 lock = overlap; /* for debug output below */
629 break;
630
631 case 2: /* overlap contains lock */
632 /*
633 * Check for common starting point and different types.
634 */
635 if (overlap->lf_type == lock->lf_type) {
636 lf_free(lock);
637 lock = overlap; /* for debug output below */
638 break;
639 }
640 if (overlap->lf_start == lock->lf_start) {
641 *prev = lock;
642 lock->lf_next = overlap;
643 overlap->lf_start = lock->lf_end + 1;
644 } else
645 lf_split(overlap, lock, sparelock);
646 lf_wakelock(overlap);
647 break;
648
649 case 3: /* lock contains overlap */
650 /*
651 * If downgrading lock, others may be able to
652 * acquire it, otherwise take the list.
653 */
654 if (lock->lf_type == F_RDLCK &&
655 overlap->lf_type == F_WRLCK) {
656 lf_wakelock(overlap);
657 } else {
658 while ((ltmp = TAILQ_FIRST(&overlap->lf_blkhd))) {
659 KASSERT(ltmp->lf_next == overlap);
660 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
661 lf_block);
662 ltmp->lf_next = lock;
663 TAILQ_INSERT_TAIL(&lock->lf_blkhd,
664 ltmp, lf_block);
665 }
666 }
667 /*
668 * Add the new lock if necessary and delete the overlap.
669 */
670 if (needtolink) {
671 *prev = lock;
672 lock->lf_next = overlap->lf_next;
673 prev = &lock->lf_next;
674 needtolink = 0;
675 } else
676 *prev = overlap->lf_next;
677 lf_free(overlap);
678 continue;
679
680 case 4: /* overlap starts before lock */
681 /*
682 * Add lock after overlap on the list.
683 */
684 lock->lf_next = overlap->lf_next;
685 overlap->lf_next = lock;
686 overlap->lf_end = lock->lf_start - 1;
687 prev = &lock->lf_next;
688 lf_wakelock(overlap);
689 needtolink = 0;
690 continue;
691
692 case 5: /* overlap ends after lock */
693 /*
694 * Add the new lock before overlap.
695 */
696 if (needtolink) {
697 *prev = lock;
698 lock->lf_next = overlap;
699 }
700 overlap->lf_start = lock->lf_end + 1;
701 lf_wakelock(overlap);
702 break;
703 }
704 break;
705 }
706 #ifdef LOCKF_DEBUG
707 if (lockf_debug & 1) {
708 lf_print("lf_setlock: got the lock", lock);
709 lf_printlist("lf_setlock", lock);
710 }
711 #endif /* LOCKF_DEBUG */
712 return 0;
713 }
714
715 /*
716 * Check whether there is a blocking lock,
717 * and if so return its process identifier.
718 */
719 static int
720 lf_getlock(struct lockf *lock, struct flock *fl)
721 {
722 struct lockf *block;
723
724 #ifdef LOCKF_DEBUG
725 if (lockf_debug & 1)
726 lf_print("lf_getlock", lock);
727 #endif /* LOCKF_DEBUG */
728
729 if ((block = lf_getblock(lock)) != NULL) {
730 fl->l_type = block->lf_type;
731 fl->l_whence = SEEK_SET;
732 fl->l_start = block->lf_start;
733 if (block->lf_end == -1)
734 fl->l_len = 0;
735 else
736 fl->l_len = block->lf_end - block->lf_start + 1;
737 if (block->lf_flags & F_POSIX)
738 fl->l_pid = ((struct proc *)block->lf_id)->p_pid;
739 else
740 fl->l_pid = -1;
741 } else {
742 fl->l_type = F_UNLCK;
743 }
744 return 0;
745 }
746
747 /*
748 * Do an advisory lock operation.
749 */
750 int
751 lf_advlock(struct vop_advlock_args *ap, struct lockf **head, off_t size)
752 {
753 struct proc *p = curproc;
754 struct flock *fl = ap->a_fl;
755 struct lockf *lock = NULL;
756 struct lockf *sparelock;
757 struct simplelock *interlock = &ap->a_vp->v_interlock;
758 off_t start, end;
759 int error = 0;
760
761 /*
762 * Convert the flock structure into a start and end.
763 */
764 switch (fl->l_whence) {
765 case SEEK_SET:
766 case SEEK_CUR:
767 /*
768 * Caller is responsible for adding any necessary offset
769 * when SEEK_CUR is used.
770 */
771 start = fl->l_start;
772 break;
773
774 case SEEK_END:
775 start = size + fl->l_start;
776 break;
777
778 default:
779 return EINVAL;
780 }
781 if (start < 0)
782 return EINVAL;
783
784 /*
785 * allocate locks before acquire simple lock.
786 * we need two locks in the worst case.
787 */
788 switch (ap->a_op) {
789 case F_SETLK:
790 case F_UNLCK:
791 /*
792 * XXX for F_UNLCK case, we can re-use lock.
793 */
794 if ((ap->a_flags & F_FLOCK) == 0) {
795 /*
796 * byte-range lock might need one more lock.
797 */
798 sparelock = lf_alloc(p->p_ucred->cr_uid, 0);
799 if (sparelock == NULL) {
800 error = ENOMEM;
801 goto quit;
802 }
803 break;
804 }
805 /* FALLTHROUGH */
806
807 case F_GETLK:
808 sparelock = NULL;
809 break;
810
811 default:
812 return EINVAL;
813 }
814
815 lock = lf_alloc(p->p_ucred->cr_uid, ap->a_op != F_UNLCK ? 1 : 2);
816 if (lock == NULL) {
817 error = ENOMEM;
818 goto quit;
819 }
820
821 simple_lock(interlock);
822
823 /*
824 * Avoid the common case of unlocking when inode has no locks.
825 */
826 if (*head == (struct lockf *)0) {
827 if (ap->a_op != F_SETLK) {
828 fl->l_type = F_UNLCK;
829 error = 0;
830 goto quit_unlock;
831 }
832 }
833
834 if (fl->l_len == 0)
835 end = -1;
836 else
837 end = start + fl->l_len - 1;
838 /*
839 * Create the lockf structure.
840 */
841 lock->lf_start = start;
842 lock->lf_end = end;
843 /* XXX NJWLWP
844 * I don't want to make the entire VFS universe use LWPs, because
845 * they don't need them, for the most part. This is an exception,
846 * and a kluge.
847 */
848
849 lock->lf_head = head;
850 lock->lf_type = fl->l_type;
851 lock->lf_next = (struct lockf *)0;
852 TAILQ_INIT(&lock->lf_blkhd);
853 lock->lf_flags = ap->a_flags;
854 if (lock->lf_flags & F_POSIX) {
855 KASSERT(curproc == (struct proc *)ap->a_id);
856 }
857 lock->lf_id = (struct proc *)ap->a_id;
858 lock->lf_lwp = curlwp;
859
860 /*
861 * Do the requested operation.
862 */
863 switch (ap->a_op) {
864
865 case F_SETLK:
866 error = lf_setlock(lock, &sparelock, interlock);
867 lock = NULL; /* lf_setlock freed it */
868 break;
869
870 case F_UNLCK:
871 error = lf_clearlock(lock, &sparelock);
872 break;
873
874 case F_GETLK:
875 error = lf_getlock(lock, fl);
876 break;
877
878 default:
879 break;
880 /* NOTREACHED */
881 }
882
883 quit_unlock:
884 simple_unlock(interlock);
885 quit:
886 if (lock)
887 lf_free(lock);
888 if (sparelock)
889 lf_free(sparelock);
890
891 return error;
892 }
893