vfs_lockf.c revision 1.37 1 1.37 perry /* $NetBSD: vfs_lockf.c,v 1.37 2005/02/26 21:34:56 perry Exp $ */
2 1.5 cgd
3 1.1 ws /*
4 1.4 mycroft * Copyright (c) 1982, 1986, 1989, 1993
5 1.4 mycroft * The Regents of the University of California. All rights reserved.
6 1.1 ws *
7 1.1 ws * This code is derived from software contributed to Berkeley by
8 1.1 ws * Scooter Morris at Genentech Inc.
9 1.1 ws *
10 1.1 ws * Redistribution and use in source and binary forms, with or without
11 1.1 ws * modification, are permitted provided that the following conditions
12 1.1 ws * are met:
13 1.1 ws * 1. Redistributions of source code must retain the above copyright
14 1.1 ws * notice, this list of conditions and the following disclaimer.
15 1.1 ws * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 ws * notice, this list of conditions and the following disclaimer in the
17 1.1 ws * documentation and/or other materials provided with the distribution.
18 1.33 agc * 3. Neither the name of the University nor the names of its contributors
19 1.1 ws * may be used to endorse or promote products derived from this software
20 1.1 ws * without specific prior written permission.
21 1.1 ws *
22 1.1 ws * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 1.1 ws * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 1.1 ws * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 1.1 ws * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 1.1 ws * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 1.1 ws * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 1.1 ws * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 1.1 ws * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 1.1 ws * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 1.1 ws * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 1.1 ws * SUCH DAMAGE.
33 1.1 ws *
34 1.12 fvdl * @(#)ufs_lockf.c 8.4 (Berkeley) 10/26/94
35 1.1 ws */
36 1.18 lukem
37 1.18 lukem #include <sys/cdefs.h>
38 1.37 perry __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.37 2005/02/26 21:34:56 perry Exp $");
39 1.1 ws
40 1.1 ws #include <sys/param.h>
41 1.1 ws #include <sys/systm.h>
42 1.1 ws #include <sys/kernel.h>
43 1.1 ws #include <sys/file.h>
44 1.1 ws #include <sys/proc.h>
45 1.1 ws #include <sys/vnode.h>
46 1.35 simonb #include <sys/pool.h>
47 1.1 ws #include <sys/fcntl.h>
48 1.1 ws #include <sys/lockf.h>
49 1.22 thorpej
50 1.35 simonb POOL_INIT(lockfpool, sizeof(struct lockf), 0, 0, 0, "lockfpl",
51 1.35 simonb &pool_allocator_nointr);
52 1.1 ws
53 1.1 ws /*
54 1.6 mycroft * This variable controls the maximum number of processes that will
55 1.6 mycroft * be checked in doing deadlock detection.
56 1.6 mycroft */
57 1.6 mycroft int maxlockdepth = MAXDEPTH;
58 1.6 mycroft
59 1.6 mycroft #ifdef LOCKF_DEBUG
60 1.6 mycroft int lockf_debug = 0;
61 1.6 mycroft #endif
62 1.6 mycroft
63 1.6 mycroft #define NOLOCKF (struct lockf *)0
64 1.6 mycroft #define SELF 0x1
65 1.6 mycroft #define OTHERS 0x2
66 1.6 mycroft
67 1.27 yamt static int lf_clearlock(struct lockf *, struct lockf **);
68 1.25 yamt static int lf_findoverlap(struct lockf *,
69 1.25 yamt struct lockf *, int, struct lockf ***, struct lockf **);
70 1.25 yamt static struct lockf *lf_getblock(struct lockf *);
71 1.25 yamt static int lf_getlock(struct lockf *, struct flock *);
72 1.27 yamt static int lf_setlock(struct lockf *, struct lockf **, struct simplelock *);
73 1.27 yamt static void lf_split(struct lockf *, struct lockf *, struct lockf **);
74 1.25 yamt static void lf_wakelock(struct lockf *);
75 1.24 yamt
76 1.24 yamt #ifdef LOCKF_DEBUG
77 1.25 yamt static void lf_print(char *, struct lockf *);
78 1.25 yamt static void lf_printlist(char *, struct lockf *);
79 1.24 yamt #endif
80 1.24 yamt
81 1.6 mycroft /*
82 1.16 sommerfe * XXX TODO
83 1.16 sommerfe * Misc cleanups: "caddr_t id" should be visible in the API as a
84 1.16 sommerfe * "struct proc *".
85 1.16 sommerfe * (This requires rototilling all VFS's which support advisory locking).
86 1.16 sommerfe */
87 1.16 sommerfe
88 1.16 sommerfe /*
89 1.16 sommerfe * If there's a lot of lock contention on a single vnode, locking
90 1.16 sommerfe * schemes which allow for more paralleism would be needed. Given how
91 1.16 sommerfe * infrequently byte-range locks are actually used in typical BSD
92 1.16 sommerfe * code, a more complex approach probably isn't worth it.
93 1.16 sommerfe */
94 1.16 sommerfe
95 1.16 sommerfe /*
96 1.4 mycroft * Do an advisory lock operation.
97 1.1 ws */
98 1.4 mycroft int
99 1.25 yamt lf_advlock(struct vop_advlock_args *ap, struct lockf **head, off_t size)
100 1.1 ws {
101 1.17 jdolecek struct flock *fl = ap->a_fl;
102 1.27 yamt struct lockf *lock = NULL;
103 1.27 yamt struct lockf *sparelock;
104 1.30 yamt struct simplelock *interlock = &ap->a_vp->v_interlock;
105 1.1 ws off_t start, end;
106 1.34 christos int error = 0;
107 1.1 ws
108 1.1 ws /*
109 1.1 ws * Convert the flock structure into a start and end.
110 1.1 ws */
111 1.1 ws switch (fl->l_whence) {
112 1.1 ws case SEEK_SET:
113 1.1 ws case SEEK_CUR:
114 1.1 ws /*
115 1.1 ws * Caller is responsible for adding any necessary offset
116 1.1 ws * when SEEK_CUR is used.
117 1.1 ws */
118 1.1 ws start = fl->l_start;
119 1.1 ws break;
120 1.1 ws
121 1.1 ws case SEEK_END:
122 1.1 ws start = size + fl->l_start;
123 1.1 ws break;
124 1.1 ws
125 1.1 ws default:
126 1.29 yamt return EINVAL;
127 1.1 ws }
128 1.1 ws if (start < 0)
129 1.29 yamt return EINVAL;
130 1.10 kleink
131 1.10 kleink /*
132 1.27 yamt * allocate locks before acquire simple lock.
133 1.27 yamt * we need two locks in the worst case.
134 1.27 yamt */
135 1.27 yamt switch (ap->a_op) {
136 1.27 yamt case F_SETLK:
137 1.27 yamt case F_UNLCK:
138 1.27 yamt /*
139 1.27 yamt * XXX for F_UNLCK case, we can re-use lock.
140 1.27 yamt */
141 1.27 yamt if ((fl->l_type & F_FLOCK) == 0) {
142 1.27 yamt /*
143 1.27 yamt * byte-range lock might need one more lock.
144 1.27 yamt */
145 1.35 simonb sparelock = pool_get(&lockfpool, PR_WAITOK);
146 1.27 yamt if (sparelock == NULL) {
147 1.27 yamt error = ENOMEM;
148 1.27 yamt goto quit;
149 1.27 yamt }
150 1.27 yamt break;
151 1.27 yamt }
152 1.27 yamt /* FALLTHROUGH */
153 1.27 yamt
154 1.27 yamt case F_GETLK:
155 1.27 yamt sparelock = NULL;
156 1.27 yamt break;
157 1.27 yamt
158 1.27 yamt default:
159 1.29 yamt return EINVAL;
160 1.27 yamt }
161 1.27 yamt
162 1.35 simonb lock = pool_get(&lockfpool, PR_WAITOK);
163 1.27 yamt if (lock == NULL) {
164 1.27 yamt error = ENOMEM;
165 1.27 yamt goto quit;
166 1.27 yamt }
167 1.27 yamt
168 1.30 yamt simple_lock(interlock);
169 1.27 yamt
170 1.27 yamt /*
171 1.10 kleink * Avoid the common case of unlocking when inode has no locks.
172 1.10 kleink */
173 1.10 kleink if (*head == (struct lockf *)0) {
174 1.17 jdolecek if (ap->a_op != F_SETLK) {
175 1.10 kleink fl->l_type = F_UNLCK;
176 1.27 yamt error = 0;
177 1.27 yamt goto quit_unlock;
178 1.10 kleink }
179 1.10 kleink }
180 1.10 kleink
181 1.1 ws if (fl->l_len == 0)
182 1.1 ws end = -1;
183 1.1 ws else
184 1.1 ws end = start + fl->l_len - 1;
185 1.1 ws /*
186 1.4 mycroft * Create the lockf structure.
187 1.1 ws */
188 1.1 ws lock->lf_start = start;
189 1.1 ws lock->lf_end = end;
190 1.37 perry /* XXX NJWLWP
191 1.21 thorpej * I don't want to make the entire VFS universe use LWPs, because
192 1.21 thorpej * they don't need them, for the most part. This is an exception,
193 1.21 thorpej * and a kluge.
194 1.21 thorpej */
195 1.21 thorpej
196 1.1 ws lock->lf_head = head;
197 1.1 ws lock->lf_type = fl->l_type;
198 1.1 ws lock->lf_next = (struct lockf *)0;
199 1.12 fvdl TAILQ_INIT(&lock->lf_blkhd);
200 1.17 jdolecek lock->lf_flags = ap->a_flags;
201 1.21 thorpej if (lock->lf_flags & F_POSIX) {
202 1.21 thorpej KASSERT(curproc == (struct proc *)ap->a_id);
203 1.21 thorpej }
204 1.23 mycroft lock->lf_id = (struct proc *)ap->a_id;
205 1.23 mycroft lock->lf_lwp = curlwp;
206 1.37 perry
207 1.1 ws /*
208 1.1 ws * Do the requested operation.
209 1.1 ws */
210 1.17 jdolecek switch (ap->a_op) {
211 1.4 mycroft
212 1.1 ws case F_SETLK:
213 1.30 yamt error = lf_setlock(lock, &sparelock, interlock);
214 1.27 yamt lock = NULL; /* lf_setlock freed it */
215 1.27 yamt break;
216 1.1 ws
217 1.1 ws case F_UNLCK:
218 1.27 yamt error = lf_clearlock(lock, &sparelock);
219 1.27 yamt break;
220 1.1 ws
221 1.1 ws case F_GETLK:
222 1.1 ws error = lf_getlock(lock, fl);
223 1.27 yamt break;
224 1.4 mycroft
225 1.1 ws default:
226 1.31 fvdl break;
227 1.27 yamt /* NOTREACHED */
228 1.27 yamt }
229 1.27 yamt
230 1.27 yamt quit_unlock:
231 1.30 yamt simple_unlock(interlock);
232 1.27 yamt quit:
233 1.27 yamt if (lock)
234 1.35 simonb pool_put(&lockfpool, lock);
235 1.27 yamt if (sparelock)
236 1.35 simonb pool_put(&lockfpool, sparelock);
237 1.27 yamt
238 1.29 yamt return error;
239 1.1 ws }
240 1.1 ws
241 1.1 ws /*
242 1.1 ws * Set a byte-range lock.
243 1.1 ws */
244 1.24 yamt static int
245 1.27 yamt lf_setlock(struct lockf *lock, struct lockf **sparelock,
246 1.27 yamt struct simplelock *interlock)
247 1.1 ws {
248 1.15 augustss struct lockf *block;
249 1.1 ws struct lockf **head = lock->lf_head;
250 1.1 ws struct lockf **prev, *overlap, *ltmp;
251 1.1 ws static char lockstr[] = "lockf";
252 1.1 ws int ovcase, priority, needtolink, error;
253 1.1 ws
254 1.1 ws #ifdef LOCKF_DEBUG
255 1.1 ws if (lockf_debug & 1)
256 1.1 ws lf_print("lf_setlock", lock);
257 1.1 ws #endif /* LOCKF_DEBUG */
258 1.1 ws
259 1.1 ws /*
260 1.1 ws * Set the priority
261 1.1 ws */
262 1.1 ws priority = PLOCK;
263 1.1 ws if (lock->lf_type == F_WRLCK)
264 1.1 ws priority += 4;
265 1.1 ws priority |= PCATCH;
266 1.1 ws /*
267 1.1 ws * Scan lock list for this file looking for locks that would block us.
268 1.1 ws */
269 1.7 christos while ((block = lf_getblock(lock)) != NULL) {
270 1.1 ws /*
271 1.1 ws * Free the structure and return if nonblocking.
272 1.1 ws */
273 1.1 ws if ((lock->lf_flags & F_WAIT) == 0) {
274 1.35 simonb pool_put(&lockfpool, lock);
275 1.29 yamt return EAGAIN;
276 1.1 ws }
277 1.1 ws /*
278 1.1 ws * We are blocked. Since flock style locks cover
279 1.1 ws * the whole file, there is no chance for deadlock.
280 1.1 ws * For byte-range locks we must check for deadlock.
281 1.1 ws *
282 1.1 ws * Deadlock detection is done by looking through the
283 1.1 ws * wait channels to see if there are any cycles that
284 1.1 ws * involve us. MAXDEPTH is set just to make sure we
285 1.16 sommerfe * do not go off into neverneverland.
286 1.1 ws */
287 1.1 ws if ((lock->lf_flags & F_POSIX) &&
288 1.1 ws (block->lf_flags & F_POSIX)) {
289 1.21 thorpej struct lwp *wlwp;
290 1.15 augustss struct lockf *waitblock;
291 1.1 ws int i = 0;
292 1.1 ws
293 1.23 mycroft /*
294 1.23 mycroft * The block is waiting on something. if_lwp will be
295 1.23 mycroft * 0 once the lock is granted, so we terminate the
296 1.23 mycroft * loop if we find this.
297 1.23 mycroft */
298 1.23 mycroft wlwp = block->lf_lwp;
299 1.23 mycroft while (wlwp && (i++ < maxlockdepth)) {
300 1.21 thorpej waitblock = (struct lockf *)wlwp->l_wchan;
301 1.1 ws /* Get the owner of the blocking lock */
302 1.1 ws waitblock = waitblock->lf_next;
303 1.1 ws if ((waitblock->lf_flags & F_POSIX) == 0)
304 1.1 ws break;
305 1.23 mycroft wlwp = waitblock->lf_lwp;
306 1.23 mycroft if (wlwp == lock->lf_lwp) {
307 1.35 simonb pool_put(&lockfpool, lock);
308 1.29 yamt return EDEADLK;
309 1.1 ws }
310 1.1 ws }
311 1.16 sommerfe /*
312 1.36 peter * If we're still following a dependency chain
313 1.16 sommerfe * after maxlockdepth iterations, assume we're in
314 1.16 sommerfe * a cycle to be safe.
315 1.16 sommerfe */
316 1.16 sommerfe if (i >= maxlockdepth) {
317 1.35 simonb pool_put(&lockfpool, lock);
318 1.29 yamt return EDEADLK;
319 1.16 sommerfe }
320 1.1 ws }
321 1.1 ws /*
322 1.1 ws * For flock type locks, we must first remove
323 1.1 ws * any shared locks that we hold before we sleep
324 1.1 ws * waiting for an exclusive lock.
325 1.1 ws */
326 1.1 ws if ((lock->lf_flags & F_FLOCK) &&
327 1.1 ws lock->lf_type == F_WRLCK) {
328 1.1 ws lock->lf_type = F_UNLCK;
329 1.27 yamt (void) lf_clearlock(lock, NULL);
330 1.1 ws lock->lf_type = F_WRLCK;
331 1.1 ws }
332 1.1 ws /*
333 1.1 ws * Add our lock to the blocked list and sleep until we're free.
334 1.1 ws * Remember who blocked us (for deadlock detection).
335 1.1 ws */
336 1.1 ws lock->lf_next = block;
337 1.12 fvdl TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
338 1.1 ws #ifdef LOCKF_DEBUG
339 1.1 ws if (lockf_debug & 1) {
340 1.1 ws lf_print("lf_setlock: blocking on", block);
341 1.1 ws lf_printlist("lf_setlock", block);
342 1.1 ws }
343 1.1 ws #endif /* LOCKF_DEBUG */
344 1.27 yamt error = ltsleep(lock, priority, lockstr, 0, interlock);
345 1.16 sommerfe
346 1.16 sommerfe /*
347 1.16 sommerfe * We may have been awakened by a signal (in
348 1.16 sommerfe * which case we must remove ourselves from the
349 1.16 sommerfe * blocked list) and/or by another process
350 1.16 sommerfe * releasing a lock (in which case we have already
351 1.16 sommerfe * been removed from the blocked list and our
352 1.16 sommerfe * lf_next field set to NOLOCKF).
353 1.16 sommerfe */
354 1.16 sommerfe if (lock->lf_next != NOLOCKF) {
355 1.16 sommerfe TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
356 1.16 sommerfe lock->lf_next = NOLOCKF;
357 1.16 sommerfe }
358 1.7 christos if (error) {
359 1.35 simonb pool_put(&lockfpool, lock);
360 1.29 yamt return error;
361 1.1 ws }
362 1.1 ws }
363 1.1 ws /*
364 1.1 ws * No blocks!! Add the lock. Note that we will
365 1.1 ws * downgrade or upgrade any overlapping locks this
366 1.1 ws * process already owns.
367 1.1 ws *
368 1.1 ws * Skip over locks owned by other processes.
369 1.1 ws * Handle any locks that overlap and are owned by ourselves.
370 1.1 ws */
371 1.23 mycroft lock->lf_lwp = 0;
372 1.1 ws prev = head;
373 1.1 ws block = *head;
374 1.1 ws needtolink = 1;
375 1.1 ws for (;;) {
376 1.7 christos ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
377 1.7 christos if (ovcase)
378 1.1 ws block = overlap->lf_next;
379 1.1 ws /*
380 1.1 ws * Six cases:
381 1.1 ws * 0) no overlap
382 1.1 ws * 1) overlap == lock
383 1.1 ws * 2) overlap contains lock
384 1.1 ws * 3) lock contains overlap
385 1.1 ws * 4) overlap starts before lock
386 1.1 ws * 5) overlap ends after lock
387 1.1 ws */
388 1.1 ws switch (ovcase) {
389 1.1 ws case 0: /* no overlap */
390 1.1 ws if (needtolink) {
391 1.1 ws *prev = lock;
392 1.1 ws lock->lf_next = overlap;
393 1.1 ws }
394 1.1 ws break;
395 1.1 ws
396 1.1 ws case 1: /* overlap == lock */
397 1.1 ws /*
398 1.1 ws * If downgrading lock, others may be
399 1.1 ws * able to acquire it.
400 1.1 ws */
401 1.1 ws if (lock->lf_type == F_RDLCK &&
402 1.1 ws overlap->lf_type == F_WRLCK)
403 1.1 ws lf_wakelock(overlap);
404 1.1 ws overlap->lf_type = lock->lf_type;
405 1.35 simonb pool_put(&lockfpool, lock);
406 1.1 ws lock = overlap; /* for debug output below */
407 1.1 ws break;
408 1.1 ws
409 1.1 ws case 2: /* overlap contains lock */
410 1.1 ws /*
411 1.1 ws * Check for common starting point and different types.
412 1.1 ws */
413 1.1 ws if (overlap->lf_type == lock->lf_type) {
414 1.35 simonb pool_put(&lockfpool, lock);
415 1.1 ws lock = overlap; /* for debug output below */
416 1.1 ws break;
417 1.1 ws }
418 1.1 ws if (overlap->lf_start == lock->lf_start) {
419 1.1 ws *prev = lock;
420 1.1 ws lock->lf_next = overlap;
421 1.1 ws overlap->lf_start = lock->lf_end + 1;
422 1.1 ws } else
423 1.27 yamt lf_split(overlap, lock, sparelock);
424 1.1 ws lf_wakelock(overlap);
425 1.1 ws break;
426 1.1 ws
427 1.1 ws case 3: /* lock contains overlap */
428 1.1 ws /*
429 1.1 ws * If downgrading lock, others may be able to
430 1.1 ws * acquire it, otherwise take the list.
431 1.1 ws */
432 1.1 ws if (lock->lf_type == F_RDLCK &&
433 1.1 ws overlap->lf_type == F_WRLCK) {
434 1.1 ws lf_wakelock(overlap);
435 1.1 ws } else {
436 1.19 matt while ((ltmp = TAILQ_FIRST(&overlap->lf_blkhd))) {
437 1.16 sommerfe KASSERT(ltmp->lf_next == overlap);
438 1.12 fvdl TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
439 1.12 fvdl lf_block);
440 1.16 sommerfe ltmp->lf_next = lock;
441 1.12 fvdl TAILQ_INSERT_TAIL(&lock->lf_blkhd,
442 1.12 fvdl ltmp, lf_block);
443 1.12 fvdl }
444 1.1 ws }
445 1.1 ws /*
446 1.1 ws * Add the new lock if necessary and delete the overlap.
447 1.1 ws */
448 1.1 ws if (needtolink) {
449 1.1 ws *prev = lock;
450 1.1 ws lock->lf_next = overlap->lf_next;
451 1.1 ws prev = &lock->lf_next;
452 1.1 ws needtolink = 0;
453 1.1 ws } else
454 1.1 ws *prev = overlap->lf_next;
455 1.35 simonb pool_put(&lockfpool, overlap);
456 1.1 ws continue;
457 1.1 ws
458 1.1 ws case 4: /* overlap starts before lock */
459 1.1 ws /*
460 1.1 ws * Add lock after overlap on the list.
461 1.1 ws */
462 1.1 ws lock->lf_next = overlap->lf_next;
463 1.1 ws overlap->lf_next = lock;
464 1.1 ws overlap->lf_end = lock->lf_start - 1;
465 1.1 ws prev = &lock->lf_next;
466 1.1 ws lf_wakelock(overlap);
467 1.1 ws needtolink = 0;
468 1.1 ws continue;
469 1.1 ws
470 1.1 ws case 5: /* overlap ends after lock */
471 1.1 ws /*
472 1.1 ws * Add the new lock before overlap.
473 1.1 ws */
474 1.1 ws if (needtolink) {
475 1.1 ws *prev = lock;
476 1.1 ws lock->lf_next = overlap;
477 1.1 ws }
478 1.1 ws overlap->lf_start = lock->lf_end + 1;
479 1.1 ws lf_wakelock(overlap);
480 1.1 ws break;
481 1.1 ws }
482 1.1 ws break;
483 1.1 ws }
484 1.1 ws #ifdef LOCKF_DEBUG
485 1.1 ws if (lockf_debug & 1) {
486 1.1 ws lf_print("lf_setlock: got the lock", lock);
487 1.1 ws lf_printlist("lf_setlock", lock);
488 1.1 ws }
489 1.1 ws #endif /* LOCKF_DEBUG */
490 1.29 yamt return 0;
491 1.1 ws }
492 1.1 ws
493 1.1 ws /*
494 1.1 ws * Remove a byte-range lock on an inode.
495 1.1 ws *
496 1.1 ws * Generally, find the lock (or an overlap to that lock)
497 1.1 ws * and remove it (or shrink it), then wakeup anyone we can.
498 1.1 ws */
499 1.24 yamt static int
500 1.27 yamt lf_clearlock(struct lockf *unlock, struct lockf **sparelock)
501 1.1 ws {
502 1.1 ws struct lockf **head = unlock->lf_head;
503 1.15 augustss struct lockf *lf = *head;
504 1.1 ws struct lockf *overlap, **prev;
505 1.1 ws int ovcase;
506 1.1 ws
507 1.1 ws if (lf == NOLOCKF)
508 1.29 yamt return 0;
509 1.1 ws #ifdef LOCKF_DEBUG
510 1.1 ws if (unlock->lf_type != F_UNLCK)
511 1.1 ws panic("lf_clearlock: bad type");
512 1.1 ws if (lockf_debug & 1)
513 1.1 ws lf_print("lf_clearlock", unlock);
514 1.1 ws #endif /* LOCKF_DEBUG */
515 1.1 ws prev = head;
516 1.7 christos while ((ovcase = lf_findoverlap(lf, unlock, SELF,
517 1.7 christos &prev, &overlap)) != 0) {
518 1.1 ws /*
519 1.1 ws * Wakeup the list of locks to be retried.
520 1.1 ws */
521 1.1 ws lf_wakelock(overlap);
522 1.1 ws
523 1.1 ws switch (ovcase) {
524 1.1 ws
525 1.1 ws case 1: /* overlap == lock */
526 1.1 ws *prev = overlap->lf_next;
527 1.35 simonb pool_put(&lockfpool, overlap);
528 1.1 ws break;
529 1.1 ws
530 1.1 ws case 2: /* overlap contains lock: split it */
531 1.1 ws if (overlap->lf_start == unlock->lf_start) {
532 1.1 ws overlap->lf_start = unlock->lf_end + 1;
533 1.1 ws break;
534 1.1 ws }
535 1.27 yamt lf_split(overlap, unlock, sparelock);
536 1.1 ws overlap->lf_next = unlock->lf_next;
537 1.1 ws break;
538 1.1 ws
539 1.1 ws case 3: /* lock contains overlap */
540 1.1 ws *prev = overlap->lf_next;
541 1.1 ws lf = overlap->lf_next;
542 1.35 simonb pool_put(&lockfpool, overlap);
543 1.1 ws continue;
544 1.1 ws
545 1.1 ws case 4: /* overlap starts before lock */
546 1.1 ws overlap->lf_end = unlock->lf_start - 1;
547 1.1 ws prev = &overlap->lf_next;
548 1.1 ws lf = overlap->lf_next;
549 1.1 ws continue;
550 1.1 ws
551 1.1 ws case 5: /* overlap ends after lock */
552 1.1 ws overlap->lf_start = unlock->lf_end + 1;
553 1.1 ws break;
554 1.1 ws }
555 1.1 ws break;
556 1.1 ws }
557 1.1 ws #ifdef LOCKF_DEBUG
558 1.1 ws if (lockf_debug & 1)
559 1.1 ws lf_printlist("lf_clearlock", unlock);
560 1.1 ws #endif /* LOCKF_DEBUG */
561 1.29 yamt return 0;
562 1.1 ws }
563 1.1 ws
564 1.1 ws /*
565 1.1 ws * Check whether there is a blocking lock,
566 1.1 ws * and if so return its process identifier.
567 1.1 ws */
568 1.24 yamt static int
569 1.25 yamt lf_getlock(struct lockf *lock, struct flock *fl)
570 1.1 ws {
571 1.15 augustss struct lockf *block;
572 1.1 ws
573 1.1 ws #ifdef LOCKF_DEBUG
574 1.1 ws if (lockf_debug & 1)
575 1.1 ws lf_print("lf_getlock", lock);
576 1.1 ws #endif /* LOCKF_DEBUG */
577 1.1 ws
578 1.7 christos if ((block = lf_getblock(lock)) != NULL) {
579 1.1 ws fl->l_type = block->lf_type;
580 1.1 ws fl->l_whence = SEEK_SET;
581 1.1 ws fl->l_start = block->lf_start;
582 1.1 ws if (block->lf_end == -1)
583 1.1 ws fl->l_len = 0;
584 1.1 ws else
585 1.1 ws fl->l_len = block->lf_end - block->lf_start + 1;
586 1.1 ws if (block->lf_flags & F_POSIX)
587 1.23 mycroft fl->l_pid = ((struct proc *)block->lf_id)->p_pid;
588 1.1 ws else
589 1.1 ws fl->l_pid = -1;
590 1.1 ws } else {
591 1.1 ws fl->l_type = F_UNLCK;
592 1.1 ws }
593 1.29 yamt return 0;
594 1.1 ws }
595 1.1 ws
596 1.1 ws /*
597 1.1 ws * Walk the list of locks for an inode and
598 1.1 ws * return the first blocking lock.
599 1.1 ws */
600 1.24 yamt static struct lockf *
601 1.25 yamt lf_getblock(struct lockf *lock)
602 1.1 ws {
603 1.1 ws struct lockf **prev, *overlap, *lf = *(lock->lf_head);
604 1.1 ws
605 1.1 ws prev = lock->lf_head;
606 1.20 simonb while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
607 1.1 ws /*
608 1.1 ws * We've found an overlap, see if it blocks us
609 1.1 ws */
610 1.1 ws if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
611 1.29 yamt return overlap;
612 1.1 ws /*
613 1.1 ws * Nope, point to the next one on the list and
614 1.1 ws * see if it blocks us
615 1.1 ws */
616 1.1 ws lf = overlap->lf_next;
617 1.1 ws }
618 1.29 yamt return NOLOCKF;
619 1.1 ws }
620 1.1 ws
621 1.1 ws /*
622 1.1 ws * Walk the list of locks for an inode to
623 1.1 ws * find an overlapping lock (if any).
624 1.1 ws *
625 1.1 ws * NOTE: this returns only the FIRST overlapping lock. There
626 1.1 ws * may be more than one.
627 1.1 ws */
628 1.24 yamt static int
629 1.25 yamt lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
630 1.25 yamt struct lockf ***prev, struct lockf **overlap)
631 1.1 ws {
632 1.1 ws off_t start, end;
633 1.1 ws
634 1.1 ws *overlap = lf;
635 1.1 ws if (lf == NOLOCKF)
636 1.29 yamt return 0;
637 1.1 ws #ifdef LOCKF_DEBUG
638 1.1 ws if (lockf_debug & 2)
639 1.1 ws lf_print("lf_findoverlap: looking for overlap in", lock);
640 1.1 ws #endif /* LOCKF_DEBUG */
641 1.1 ws start = lock->lf_start;
642 1.1 ws end = lock->lf_end;
643 1.1 ws while (lf != NOLOCKF) {
644 1.23 mycroft if (((type == SELF) && lf->lf_id != lock->lf_id) ||
645 1.23 mycroft ((type == OTHERS) && lf->lf_id == lock->lf_id)) {
646 1.1 ws *prev = &lf->lf_next;
647 1.1 ws *overlap = lf = lf->lf_next;
648 1.1 ws continue;
649 1.1 ws }
650 1.1 ws #ifdef LOCKF_DEBUG
651 1.1 ws if (lockf_debug & 2)
652 1.1 ws lf_print("\tchecking", lf);
653 1.1 ws #endif /* LOCKF_DEBUG */
654 1.1 ws /*
655 1.1 ws * OK, check for overlap
656 1.1 ws *
657 1.1 ws * Six cases:
658 1.1 ws * 0) no overlap
659 1.1 ws * 1) overlap == lock
660 1.1 ws * 2) overlap contains lock
661 1.1 ws * 3) lock contains overlap
662 1.1 ws * 4) overlap starts before lock
663 1.1 ws * 5) overlap ends after lock
664 1.1 ws */
665 1.1 ws if ((lf->lf_end != -1 && start > lf->lf_end) ||
666 1.1 ws (end != -1 && lf->lf_start > end)) {
667 1.1 ws /* Case 0 */
668 1.1 ws #ifdef LOCKF_DEBUG
669 1.1 ws if (lockf_debug & 2)
670 1.9 christos printf("no overlap\n");
671 1.1 ws #endif /* LOCKF_DEBUG */
672 1.1 ws if ((type & SELF) && end != -1 && lf->lf_start > end)
673 1.29 yamt return 0;
674 1.1 ws *prev = &lf->lf_next;
675 1.1 ws *overlap = lf = lf->lf_next;
676 1.1 ws continue;
677 1.1 ws }
678 1.1 ws if ((lf->lf_start == start) && (lf->lf_end == end)) {
679 1.1 ws /* Case 1 */
680 1.1 ws #ifdef LOCKF_DEBUG
681 1.1 ws if (lockf_debug & 2)
682 1.9 christos printf("overlap == lock\n");
683 1.1 ws #endif /* LOCKF_DEBUG */
684 1.29 yamt return 1;
685 1.1 ws }
686 1.1 ws if ((lf->lf_start <= start) &&
687 1.1 ws (end != -1) &&
688 1.1 ws ((lf->lf_end >= end) || (lf->lf_end == -1))) {
689 1.1 ws /* Case 2 */
690 1.1 ws #ifdef LOCKF_DEBUG
691 1.1 ws if (lockf_debug & 2)
692 1.9 christos printf("overlap contains lock\n");
693 1.1 ws #endif /* LOCKF_DEBUG */
694 1.29 yamt return 2;
695 1.1 ws }
696 1.1 ws if (start <= lf->lf_start &&
697 1.4 mycroft (end == -1 ||
698 1.1 ws (lf->lf_end != -1 && end >= lf->lf_end))) {
699 1.1 ws /* Case 3 */
700 1.1 ws #ifdef LOCKF_DEBUG
701 1.1 ws if (lockf_debug & 2)
702 1.9 christos printf("lock contains overlap\n");
703 1.1 ws #endif /* LOCKF_DEBUG */
704 1.29 yamt return 3;
705 1.1 ws }
706 1.1 ws if ((lf->lf_start < start) &&
707 1.1 ws ((lf->lf_end >= start) || (lf->lf_end == -1))) {
708 1.1 ws /* Case 4 */
709 1.1 ws #ifdef LOCKF_DEBUG
710 1.1 ws if (lockf_debug & 2)
711 1.9 christos printf("overlap starts before lock\n");
712 1.1 ws #endif /* LOCKF_DEBUG */
713 1.29 yamt return 4;
714 1.1 ws }
715 1.1 ws if ((lf->lf_start > start) &&
716 1.1 ws (end != -1) &&
717 1.1 ws ((lf->lf_end > end) || (lf->lf_end == -1))) {
718 1.1 ws /* Case 5 */
719 1.1 ws #ifdef LOCKF_DEBUG
720 1.1 ws if (lockf_debug & 2)
721 1.9 christos printf("overlap ends after lock\n");
722 1.1 ws #endif /* LOCKF_DEBUG */
723 1.29 yamt return 5;
724 1.1 ws }
725 1.1 ws panic("lf_findoverlap: default");
726 1.1 ws }
727 1.29 yamt return 0;
728 1.1 ws }
729 1.1 ws
730 1.1 ws /*
731 1.1 ws * Split a lock and a contained region into
732 1.1 ws * two or three locks as necessary.
733 1.1 ws */
734 1.24 yamt static void
735 1.27 yamt lf_split(struct lockf *lock1, struct lockf *lock2, struct lockf **sparelock)
736 1.1 ws {
737 1.15 augustss struct lockf *splitlock;
738 1.1 ws
739 1.1 ws #ifdef LOCKF_DEBUG
740 1.1 ws if (lockf_debug & 2) {
741 1.1 ws lf_print("lf_split", lock1);
742 1.1 ws lf_print("splitting from", lock2);
743 1.1 ws }
744 1.1 ws #endif /* LOCKF_DEBUG */
745 1.1 ws /*
746 1.1 ws * Check to see if spliting into only two pieces.
747 1.1 ws */
748 1.1 ws if (lock1->lf_start == lock2->lf_start) {
749 1.1 ws lock1->lf_start = lock2->lf_end + 1;
750 1.1 ws lock2->lf_next = lock1;
751 1.1 ws return;
752 1.1 ws }
753 1.1 ws if (lock1->lf_end == lock2->lf_end) {
754 1.1 ws lock1->lf_end = lock2->lf_start - 1;
755 1.1 ws lock2->lf_next = lock1->lf_next;
756 1.1 ws lock1->lf_next = lock2;
757 1.1 ws return;
758 1.1 ws }
759 1.1 ws /*
760 1.1 ws * Make a new lock consisting of the last part of
761 1.1 ws * the encompassing lock
762 1.1 ws */
763 1.27 yamt splitlock = *sparelock;
764 1.27 yamt *sparelock = NULL;
765 1.29 yamt memcpy(splitlock, lock1, sizeof(*splitlock));
766 1.1 ws splitlock->lf_start = lock2->lf_end + 1;
767 1.12 fvdl TAILQ_INIT(&splitlock->lf_blkhd);
768 1.1 ws lock1->lf_end = lock2->lf_start - 1;
769 1.1 ws /*
770 1.1 ws * OK, now link it in
771 1.1 ws */
772 1.1 ws splitlock->lf_next = lock1->lf_next;
773 1.1 ws lock2->lf_next = splitlock;
774 1.1 ws lock1->lf_next = lock2;
775 1.1 ws }
776 1.1 ws
777 1.1 ws /*
778 1.1 ws * Wakeup a blocklist
779 1.1 ws */
780 1.24 yamt static void
781 1.25 yamt lf_wakelock(struct lockf *listhead)
782 1.1 ws {
783 1.15 augustss struct lockf *wakelock;
784 1.1 ws
785 1.19 matt while ((wakelock = TAILQ_FIRST(&listhead->lf_blkhd))) {
786 1.16 sommerfe KASSERT(wakelock->lf_next == listhead);
787 1.12 fvdl TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
788 1.1 ws wakelock->lf_next = NOLOCKF;
789 1.1 ws #ifdef LOCKF_DEBUG
790 1.1 ws if (lockf_debug & 2)
791 1.1 ws lf_print("lf_wakelock: awakening", wakelock);
792 1.12 fvdl #endif
793 1.29 yamt wakeup(wakelock);
794 1.1 ws }
795 1.1 ws }
796 1.1 ws
797 1.1 ws #ifdef LOCKF_DEBUG
798 1.1 ws /*
799 1.1 ws * Print out a lock.
800 1.1 ws */
801 1.24 yamt static void
802 1.25 yamt lf_print(char *tag, struct lockf *lock)
803 1.1 ws {
804 1.37 perry
805 1.9 christos printf("%s: lock %p for ", tag, lock);
806 1.1 ws if (lock->lf_flags & F_POSIX)
807 1.23 mycroft printf("proc %d", ((struct proc *)lock->lf_id)->p_pid);
808 1.1 ws else
809 1.23 mycroft printf("file 0x%p", (struct file *)lock->lf_id);
810 1.11 jtk printf(" %s, start %qx, end %qx",
811 1.1 ws lock->lf_type == F_RDLCK ? "shared" :
812 1.1 ws lock->lf_type == F_WRLCK ? "exclusive" :
813 1.1 ws lock->lf_type == F_UNLCK ? "unlock" :
814 1.1 ws "unknown", lock->lf_start, lock->lf_end);
815 1.19 matt if (TAILQ_FIRST(&lock->lf_blkhd))
816 1.19 matt printf(" block %p\n", TAILQ_FIRST(&lock->lf_blkhd));
817 1.1 ws else
818 1.9 christos printf("\n");
819 1.1 ws }
820 1.1 ws
821 1.24 yamt static void
822 1.25 yamt lf_printlist(char *tag, struct lockf *lock)
823 1.1 ws {
824 1.15 augustss struct lockf *lf, *blk;
825 1.1 ws
826 1.11 jtk printf("%s: Lock list:\n", tag);
827 1.12 fvdl for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
828 1.9 christos printf("\tlock %p for ", lf);
829 1.1 ws if (lf->lf_flags & F_POSIX)
830 1.23 mycroft printf("proc %d", ((struct proc *)lf->lf_id)->p_pid);
831 1.1 ws else
832 1.23 mycroft printf("file 0x%p", (struct file *)lf->lf_id);
833 1.11 jtk printf(", %s, start %qx, end %qx",
834 1.1 ws lf->lf_type == F_RDLCK ? "shared" :
835 1.1 ws lf->lf_type == F_WRLCK ? "exclusive" :
836 1.1 ws lf->lf_type == F_UNLCK ? "unlock" :
837 1.1 ws "unknown", lf->lf_start, lf->lf_end);
838 1.19 matt TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
839 1.12 fvdl if (blk->lf_flags & F_POSIX)
840 1.12 fvdl printf("proc %d",
841 1.23 mycroft ((struct proc *)blk->lf_id)->p_pid);
842 1.12 fvdl else
843 1.23 mycroft printf("file 0x%p", (struct file *)blk->lf_id);
844 1.12 fvdl printf(", %s, start %qx, end %qx",
845 1.12 fvdl blk->lf_type == F_RDLCK ? "shared" :
846 1.12 fvdl blk->lf_type == F_WRLCK ? "exclusive" :
847 1.12 fvdl blk->lf_type == F_UNLCK ? "unlock" :
848 1.12 fvdl "unknown", blk->lf_start, blk->lf_end);
849 1.19 matt if (TAILQ_FIRST(&blk->lf_blkhd))
850 1.12 fvdl panic("lf_printlist: bad list");
851 1.12 fvdl }
852 1.12 fvdl printf("\n");
853 1.1 ws }
854 1.1 ws }
855 1.1 ws #endif /* LOCKF_DEBUG */
856