vfs_lockf.c revision 1.60 1 1.60 ad /* $NetBSD: vfs_lockf.c,v 1.60 2007/07/09 21:10:57 ad Exp $ */
2 1.5 cgd
3 1.1 ws /*
4 1.4 mycroft * Copyright (c) 1982, 1986, 1989, 1993
5 1.4 mycroft * The Regents of the University of California. All rights reserved.
6 1.1 ws *
7 1.1 ws * This code is derived from software contributed to Berkeley by
8 1.1 ws * Scooter Morris at Genentech Inc.
9 1.1 ws *
10 1.1 ws * Redistribution and use in source and binary forms, with or without
11 1.1 ws * modification, are permitted provided that the following conditions
12 1.1 ws * are met:
13 1.1 ws * 1. Redistributions of source code must retain the above copyright
14 1.1 ws * notice, this list of conditions and the following disclaimer.
15 1.1 ws * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 ws * notice, this list of conditions and the following disclaimer in the
17 1.1 ws * documentation and/or other materials provided with the distribution.
18 1.33 agc * 3. Neither the name of the University nor the names of its contributors
19 1.1 ws * may be used to endorse or promote products derived from this software
20 1.1 ws * without specific prior written permission.
21 1.1 ws *
22 1.1 ws * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 1.1 ws * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 1.1 ws * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 1.1 ws * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 1.1 ws * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 1.1 ws * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 1.1 ws * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 1.1 ws * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 1.1 ws * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 1.1 ws * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 1.1 ws * SUCH DAMAGE.
33 1.1 ws *
34 1.12 fvdl * @(#)ufs_lockf.c 8.4 (Berkeley) 10/26/94
35 1.1 ws */
36 1.18 lukem
37 1.18 lukem #include <sys/cdefs.h>
38 1.60 ad __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.60 2007/07/09 21:10:57 ad Exp $");
39 1.1 ws
40 1.1 ws #include <sys/param.h>
41 1.1 ws #include <sys/systm.h>
42 1.1 ws #include <sys/kernel.h>
43 1.1 ws #include <sys/file.h>
44 1.1 ws #include <sys/proc.h>
45 1.1 ws #include <sys/vnode.h>
46 1.35 simonb #include <sys/pool.h>
47 1.1 ws #include <sys/fcntl.h>
48 1.1 ws #include <sys/lockf.h>
49 1.49 elad #include <sys/kauth.h>
50 1.22 thorpej
51 1.50 yamt /*
52 1.50 yamt * The lockf structure is a kernel structure which contains the information
53 1.50 yamt * associated with a byte range lock. The lockf structures are linked into
54 1.60 ad * the vnode structure. Locks are sorted by the starting byte of the lock for
55 1.50 yamt * efficiency.
56 1.50 yamt *
57 1.50 yamt * lf_next is used for two purposes, depending on whether the lock is
58 1.50 yamt * being held, or is in conflict with an existing lock. If this lock
59 1.50 yamt * is held, it indicates the next lock on the same vnode.
60 1.50 yamt * For pending locks, if lock->lf_next is non-NULL, then lock->lf_block
61 1.50 yamt * must be queued on the lf_blkhd TAILQ of lock->lf_next.
62 1.50 yamt */
63 1.50 yamt
64 1.50 yamt TAILQ_HEAD(locklist, lockf);
65 1.50 yamt
66 1.50 yamt struct lockf {
67 1.50 yamt short lf_flags; /* Lock semantics: F_POSIX, F_FLOCK, F_WAIT */
68 1.50 yamt short lf_type; /* Lock type: F_RDLCK, F_WRLCK */
69 1.50 yamt off_t lf_start; /* The byte # of the start of the lock */
70 1.50 yamt off_t lf_end; /* The byte # of the end of the lock (-1=EOF)*/
71 1.50 yamt void *lf_id; /* process or file description holding lock */
72 1.50 yamt struct lockf **lf_head; /* Back pointer to the head of lockf list */
73 1.50 yamt struct lockf *lf_next; /* Next lock on this vnode, or blocking lock */
74 1.50 yamt struct locklist lf_blkhd; /* List of requests blocked on this lock */
75 1.50 yamt TAILQ_ENTRY(lockf) lf_block;/* A request waiting for a lock */
76 1.50 yamt uid_t lf_uid; /* User ID responsible */
77 1.50 yamt };
78 1.50 yamt
79 1.50 yamt /* Maximum length of sleep chains to traverse to try and detect deadlock. */
80 1.50 yamt #define MAXDEPTH 50
81 1.50 yamt
82 1.51 yamt static POOL_INIT(lockfpool, sizeof(struct lockf), 0, 0, 0, "lockfpl",
83 1.59 ad &pool_allocator_nointr, IPL_NONE);
84 1.1 ws
85 1.1 ws /*
86 1.6 mycroft * This variable controls the maximum number of processes that will
87 1.6 mycroft * be checked in doing deadlock detection.
88 1.6 mycroft */
89 1.6 mycroft int maxlockdepth = MAXDEPTH;
90 1.6 mycroft
91 1.6 mycroft #ifdef LOCKF_DEBUG
92 1.6 mycroft int lockf_debug = 0;
93 1.6 mycroft #endif
94 1.6 mycroft
95 1.6 mycroft #define SELF 0x1
96 1.6 mycroft #define OTHERS 0x2
97 1.6 mycroft
98 1.6 mycroft /*
99 1.16 sommerfe * XXX TODO
100 1.58 christos * Misc cleanups: "void *id" should be visible in the API as a
101 1.16 sommerfe * "struct proc *".
102 1.16 sommerfe * (This requires rototilling all VFS's which support advisory locking).
103 1.16 sommerfe */
104 1.16 sommerfe
105 1.16 sommerfe /*
106 1.16 sommerfe * If there's a lot of lock contention on a single vnode, locking
107 1.16 sommerfe * schemes which allow for more paralleism would be needed. Given how
108 1.16 sommerfe * infrequently byte-range locks are actually used in typical BSD
109 1.16 sommerfe * code, a more complex approach probably isn't worth it.
110 1.16 sommerfe */
111 1.16 sommerfe
112 1.16 sommerfe /*
113 1.38 christos * We enforce a limit on locks by uid, so that a single user cannot
114 1.38 christos * run the kernel out of memory. For now, the limit is pretty coarse.
115 1.38 christos * There is no limit on root.
116 1.38 christos *
117 1.38 christos * Splitting a lock will always succeed, regardless of current allocations.
118 1.38 christos * If you're slightly above the limit, we still have to permit an allocation
119 1.38 christos * so that the unlock can succeed. If the unlocking causes too many splits,
120 1.38 christos * however, you're totally cutoff.
121 1.38 christos */
122 1.38 christos int maxlocksperuid = 1024;
123 1.38 christos
124 1.45 thorpej #ifdef LOCKF_DEBUG
125 1.45 thorpej /*
126 1.45 thorpej * Print out a lock.
127 1.45 thorpej */
128 1.45 thorpej static void
129 1.56 christos lf_print(const char *tag, struct lockf *lock)
130 1.45 thorpej {
131 1.45 thorpej
132 1.45 thorpej printf("%s: lock %p for ", tag, lock);
133 1.45 thorpej if (lock->lf_flags & F_POSIX)
134 1.45 thorpej printf("proc %d", ((struct proc *)lock->lf_id)->p_pid);
135 1.45 thorpej else
136 1.45 thorpej printf("file %p", (struct file *)lock->lf_id);
137 1.45 thorpej printf(" %s, start %qx, end %qx",
138 1.45 thorpej lock->lf_type == F_RDLCK ? "shared" :
139 1.45 thorpej lock->lf_type == F_WRLCK ? "exclusive" :
140 1.45 thorpej lock->lf_type == F_UNLCK ? "unlock" :
141 1.45 thorpej "unknown", lock->lf_start, lock->lf_end);
142 1.45 thorpej if (TAILQ_FIRST(&lock->lf_blkhd))
143 1.45 thorpej printf(" block %p\n", TAILQ_FIRST(&lock->lf_blkhd));
144 1.45 thorpej else
145 1.45 thorpej printf("\n");
146 1.45 thorpej }
147 1.45 thorpej
148 1.45 thorpej static void
149 1.56 christos lf_printlist(const char *tag, struct lockf *lock)
150 1.45 thorpej {
151 1.45 thorpej struct lockf *lf, *blk;
152 1.45 thorpej
153 1.45 thorpej printf("%s: Lock list:\n", tag);
154 1.45 thorpej for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
155 1.45 thorpej printf("\tlock %p for ", lf);
156 1.45 thorpej if (lf->lf_flags & F_POSIX)
157 1.45 thorpej printf("proc %d", ((struct proc *)lf->lf_id)->p_pid);
158 1.45 thorpej else
159 1.45 thorpej printf("file %p", (struct file *)lf->lf_id);
160 1.45 thorpej printf(", %s, start %qx, end %qx",
161 1.45 thorpej lf->lf_type == F_RDLCK ? "shared" :
162 1.45 thorpej lf->lf_type == F_WRLCK ? "exclusive" :
163 1.45 thorpej lf->lf_type == F_UNLCK ? "unlock" :
164 1.45 thorpej "unknown", lf->lf_start, lf->lf_end);
165 1.45 thorpej TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
166 1.45 thorpej if (blk->lf_flags & F_POSIX)
167 1.45 thorpej printf("proc %d",
168 1.45 thorpej ((struct proc *)blk->lf_id)->p_pid);
169 1.45 thorpej else
170 1.45 thorpej printf("file %p", (struct file *)blk->lf_id);
171 1.45 thorpej printf(", %s, start %qx, end %qx",
172 1.45 thorpej blk->lf_type == F_RDLCK ? "shared" :
173 1.45 thorpej blk->lf_type == F_WRLCK ? "exclusive" :
174 1.45 thorpej blk->lf_type == F_UNLCK ? "unlock" :
175 1.45 thorpej "unknown", blk->lf_start, blk->lf_end);
176 1.45 thorpej if (TAILQ_FIRST(&blk->lf_blkhd))
177 1.45 thorpej panic("lf_printlist: bad list");
178 1.45 thorpej }
179 1.45 thorpej printf("\n");
180 1.45 thorpej }
181 1.45 thorpej }
182 1.45 thorpej #endif /* LOCKF_DEBUG */
183 1.45 thorpej
184 1.38 christos /*
185 1.38 christos * 3 options for allowfail.
186 1.38 christos * 0 - always allocate. 1 - cutoff at limit. 2 - cutoff at double limit.
187 1.38 christos */
188 1.45 thorpej static struct lockf *
189 1.38 christos lf_alloc(uid_t uid, int allowfail)
190 1.38 christos {
191 1.38 christos struct uidinfo *uip;
192 1.38 christos struct lockf *lock;
193 1.38 christos
194 1.38 christos uip = uid_find(uid);
195 1.60 ad mutex_enter(&uip->ui_lock);
196 1.38 christos if (uid && allowfail && uip->ui_lockcnt >
197 1.40 christos (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2))) {
198 1.60 ad mutex_exit(&uip->ui_lock);
199 1.40 christos return NULL;
200 1.40 christos }
201 1.38 christos uip->ui_lockcnt++;
202 1.60 ad mutex_exit(&uip->ui_lock);
203 1.38 christos lock = pool_get(&lockfpool, PR_WAITOK);
204 1.38 christos lock->lf_uid = uid;
205 1.40 christos return lock;
206 1.38 christos }
207 1.38 christos
208 1.45 thorpej static void
209 1.38 christos lf_free(struct lockf *lock)
210 1.38 christos {
211 1.38 christos struct uidinfo *uip;
212 1.38 christos
213 1.38 christos uip = uid_find(lock->lf_uid);
214 1.60 ad mutex_enter(&uip->ui_lock);
215 1.38 christos uip->ui_lockcnt--;
216 1.60 ad mutex_exit(&uip->ui_lock);
217 1.38 christos pool_put(&lockfpool, lock);
218 1.38 christos }
219 1.38 christos
220 1.38 christos /*
221 1.45 thorpej * Walk the list of locks for an inode to
222 1.45 thorpej * find an overlapping lock (if any).
223 1.45 thorpej *
224 1.45 thorpej * NOTE: this returns only the FIRST overlapping lock. There
225 1.45 thorpej * may be more than one.
226 1.1 ws */
227 1.45 thorpej static int
228 1.45 thorpej lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
229 1.45 thorpej struct lockf ***prev, struct lockf **overlap)
230 1.1 ws {
231 1.1 ws off_t start, end;
232 1.1 ws
233 1.45 thorpej *overlap = lf;
234 1.54 yamt if (lf == NULL)
235 1.45 thorpej return 0;
236 1.45 thorpej #ifdef LOCKF_DEBUG
237 1.45 thorpej if (lockf_debug & 2)
238 1.45 thorpej lf_print("lf_findoverlap: looking for overlap in", lock);
239 1.45 thorpej #endif /* LOCKF_DEBUG */
240 1.45 thorpej start = lock->lf_start;
241 1.45 thorpej end = lock->lf_end;
242 1.54 yamt while (lf != NULL) {
243 1.45 thorpej if (((type == SELF) && lf->lf_id != lock->lf_id) ||
244 1.45 thorpej ((type == OTHERS) && lf->lf_id == lock->lf_id)) {
245 1.45 thorpej *prev = &lf->lf_next;
246 1.45 thorpej *overlap = lf = lf->lf_next;
247 1.45 thorpej continue;
248 1.45 thorpej }
249 1.45 thorpej #ifdef LOCKF_DEBUG
250 1.45 thorpej if (lockf_debug & 2)
251 1.45 thorpej lf_print("\tchecking", lf);
252 1.45 thorpej #endif /* LOCKF_DEBUG */
253 1.1 ws /*
254 1.45 thorpej * OK, check for overlap
255 1.45 thorpej *
256 1.45 thorpej * Six cases:
257 1.45 thorpej * 0) no overlap
258 1.45 thorpej * 1) overlap == lock
259 1.45 thorpej * 2) overlap contains lock
260 1.45 thorpej * 3) lock contains overlap
261 1.45 thorpej * 4) overlap starts before lock
262 1.45 thorpej * 5) overlap ends after lock
263 1.1 ws */
264 1.45 thorpej if ((lf->lf_end != -1 && start > lf->lf_end) ||
265 1.45 thorpej (end != -1 && lf->lf_start > end)) {
266 1.45 thorpej /* Case 0 */
267 1.45 thorpej #ifdef LOCKF_DEBUG
268 1.45 thorpej if (lockf_debug & 2)
269 1.45 thorpej printf("no overlap\n");
270 1.45 thorpej #endif /* LOCKF_DEBUG */
271 1.45 thorpej if ((type & SELF) && end != -1 && lf->lf_start > end)
272 1.45 thorpej return 0;
273 1.45 thorpej *prev = &lf->lf_next;
274 1.45 thorpej *overlap = lf = lf->lf_next;
275 1.45 thorpej continue;
276 1.45 thorpej }
277 1.45 thorpej if ((lf->lf_start == start) && (lf->lf_end == end)) {
278 1.45 thorpej /* Case 1 */
279 1.45 thorpej #ifdef LOCKF_DEBUG
280 1.45 thorpej if (lockf_debug & 2)
281 1.45 thorpej printf("overlap == lock\n");
282 1.45 thorpej #endif /* LOCKF_DEBUG */
283 1.45 thorpej return 1;
284 1.45 thorpej }
285 1.45 thorpej if ((lf->lf_start <= start) &&
286 1.45 thorpej (end != -1) &&
287 1.45 thorpej ((lf->lf_end >= end) || (lf->lf_end == -1))) {
288 1.45 thorpej /* Case 2 */
289 1.45 thorpej #ifdef LOCKF_DEBUG
290 1.45 thorpej if (lockf_debug & 2)
291 1.45 thorpej printf("overlap contains lock\n");
292 1.45 thorpej #endif /* LOCKF_DEBUG */
293 1.45 thorpej return 2;
294 1.45 thorpej }
295 1.45 thorpej if (start <= lf->lf_start &&
296 1.45 thorpej (end == -1 ||
297 1.45 thorpej (lf->lf_end != -1 && end >= lf->lf_end))) {
298 1.45 thorpej /* Case 3 */
299 1.45 thorpej #ifdef LOCKF_DEBUG
300 1.45 thorpej if (lockf_debug & 2)
301 1.45 thorpej printf("lock contains overlap\n");
302 1.45 thorpej #endif /* LOCKF_DEBUG */
303 1.45 thorpej return 3;
304 1.45 thorpej }
305 1.45 thorpej if ((lf->lf_start < start) &&
306 1.45 thorpej ((lf->lf_end >= start) || (lf->lf_end == -1))) {
307 1.45 thorpej /* Case 4 */
308 1.45 thorpej #ifdef LOCKF_DEBUG
309 1.45 thorpej if (lockf_debug & 2)
310 1.45 thorpej printf("overlap starts before lock\n");
311 1.45 thorpej #endif /* LOCKF_DEBUG */
312 1.45 thorpej return 4;
313 1.45 thorpej }
314 1.45 thorpej if ((lf->lf_start > start) &&
315 1.45 thorpej (end != -1) &&
316 1.45 thorpej ((lf->lf_end > end) || (lf->lf_end == -1))) {
317 1.45 thorpej /* Case 5 */
318 1.45 thorpej #ifdef LOCKF_DEBUG
319 1.45 thorpej if (lockf_debug & 2)
320 1.45 thorpej printf("overlap ends after lock\n");
321 1.45 thorpej #endif /* LOCKF_DEBUG */
322 1.45 thorpej return 5;
323 1.45 thorpej }
324 1.45 thorpej panic("lf_findoverlap: default");
325 1.45 thorpej }
326 1.45 thorpej return 0;
327 1.45 thorpej }
328 1.1 ws
329 1.45 thorpej /*
330 1.45 thorpej * Split a lock and a contained region into
331 1.45 thorpej * two or three locks as necessary.
332 1.45 thorpej */
333 1.45 thorpej static void
334 1.45 thorpej lf_split(struct lockf *lock1, struct lockf *lock2, struct lockf **sparelock)
335 1.45 thorpej {
336 1.45 thorpej struct lockf *splitlock;
337 1.1 ws
338 1.45 thorpej #ifdef LOCKF_DEBUG
339 1.45 thorpej if (lockf_debug & 2) {
340 1.45 thorpej lf_print("lf_split", lock1);
341 1.45 thorpej lf_print("splitting from", lock2);
342 1.1 ws }
343 1.45 thorpej #endif /* LOCKF_DEBUG */
344 1.10 kleink /*
345 1.45 thorpej * Check to see if spliting into only two pieces.
346 1.27 yamt */
347 1.45 thorpej if (lock1->lf_start == lock2->lf_start) {
348 1.45 thorpej lock1->lf_start = lock2->lf_end + 1;
349 1.45 thorpej lock2->lf_next = lock1;
350 1.45 thorpej return;
351 1.27 yamt }
352 1.45 thorpej if (lock1->lf_end == lock2->lf_end) {
353 1.45 thorpej lock1->lf_end = lock2->lf_start - 1;
354 1.45 thorpej lock2->lf_next = lock1->lf_next;
355 1.45 thorpej lock1->lf_next = lock2;
356 1.45 thorpej return;
357 1.27 yamt }
358 1.27 yamt /*
359 1.45 thorpej * Make a new lock consisting of the last part of
360 1.45 thorpej * the encompassing lock
361 1.10 kleink */
362 1.45 thorpej splitlock = *sparelock;
363 1.45 thorpej *sparelock = NULL;
364 1.45 thorpej memcpy(splitlock, lock1, sizeof(*splitlock));
365 1.45 thorpej splitlock->lf_start = lock2->lf_end + 1;
366 1.45 thorpej TAILQ_INIT(&splitlock->lf_blkhd);
367 1.45 thorpej lock1->lf_end = lock2->lf_start - 1;
368 1.1 ws /*
369 1.45 thorpej * OK, now link it in
370 1.21 thorpej */
371 1.45 thorpej splitlock->lf_next = lock1->lf_next;
372 1.45 thorpej lock2->lf_next = splitlock;
373 1.45 thorpej lock1->lf_next = lock2;
374 1.45 thorpej }
375 1.45 thorpej
376 1.45 thorpej /*
377 1.45 thorpej * Wakeup a blocklist
378 1.45 thorpej */
379 1.45 thorpej static void
380 1.45 thorpej lf_wakelock(struct lockf *listhead)
381 1.45 thorpej {
382 1.45 thorpej struct lockf *wakelock;
383 1.21 thorpej
384 1.45 thorpej while ((wakelock = TAILQ_FIRST(&listhead->lf_blkhd))) {
385 1.45 thorpej KASSERT(wakelock->lf_next == listhead);
386 1.45 thorpej TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
387 1.54 yamt wakelock->lf_next = NULL;
388 1.45 thorpej #ifdef LOCKF_DEBUG
389 1.45 thorpej if (lockf_debug & 2)
390 1.45 thorpej lf_print("lf_wakelock: awakening", wakelock);
391 1.45 thorpej #endif
392 1.45 thorpej wakeup(wakelock);
393 1.21 thorpej }
394 1.45 thorpej }
395 1.45 thorpej
396 1.45 thorpej /*
397 1.45 thorpej * Remove a byte-range lock on an inode.
398 1.45 thorpej *
399 1.45 thorpej * Generally, find the lock (or an overlap to that lock)
400 1.45 thorpej * and remove it (or shrink it), then wakeup anyone we can.
401 1.45 thorpej */
402 1.45 thorpej static int
403 1.45 thorpej lf_clearlock(struct lockf *unlock, struct lockf **sparelock)
404 1.45 thorpej {
405 1.45 thorpej struct lockf **head = unlock->lf_head;
406 1.45 thorpej struct lockf *lf = *head;
407 1.45 thorpej struct lockf *overlap, **prev;
408 1.45 thorpej int ovcase;
409 1.45 thorpej
410 1.54 yamt if (lf == NULL)
411 1.45 thorpej return 0;
412 1.45 thorpej #ifdef LOCKF_DEBUG
413 1.45 thorpej if (unlock->lf_type != F_UNLCK)
414 1.45 thorpej panic("lf_clearlock: bad type");
415 1.45 thorpej if (lockf_debug & 1)
416 1.45 thorpej lf_print("lf_clearlock", unlock);
417 1.45 thorpej #endif /* LOCKF_DEBUG */
418 1.45 thorpej prev = head;
419 1.45 thorpej while ((ovcase = lf_findoverlap(lf, unlock, SELF,
420 1.45 thorpej &prev, &overlap)) != 0) {
421 1.45 thorpej /*
422 1.45 thorpej * Wakeup the list of locks to be retried.
423 1.45 thorpej */
424 1.45 thorpej lf_wakelock(overlap);
425 1.45 thorpej
426 1.45 thorpej switch (ovcase) {
427 1.37 perry
428 1.45 thorpej case 1: /* overlap == lock */
429 1.45 thorpej *prev = overlap->lf_next;
430 1.45 thorpej lf_free(overlap);
431 1.45 thorpej break;
432 1.4 mycroft
433 1.45 thorpej case 2: /* overlap contains lock: split it */
434 1.45 thorpej if (overlap->lf_start == unlock->lf_start) {
435 1.45 thorpej overlap->lf_start = unlock->lf_end + 1;
436 1.45 thorpej break;
437 1.45 thorpej }
438 1.45 thorpej lf_split(overlap, unlock, sparelock);
439 1.45 thorpej overlap->lf_next = unlock->lf_next;
440 1.45 thorpej break;
441 1.1 ws
442 1.45 thorpej case 3: /* lock contains overlap */
443 1.45 thorpej *prev = overlap->lf_next;
444 1.45 thorpej lf = overlap->lf_next;
445 1.45 thorpej lf_free(overlap);
446 1.45 thorpej continue;
447 1.1 ws
448 1.45 thorpej case 4: /* overlap starts before lock */
449 1.45 thorpej overlap->lf_end = unlock->lf_start - 1;
450 1.45 thorpej prev = &overlap->lf_next;
451 1.45 thorpej lf = overlap->lf_next;
452 1.45 thorpej continue;
453 1.4 mycroft
454 1.45 thorpej case 5: /* overlap ends after lock */
455 1.45 thorpej overlap->lf_start = unlock->lf_end + 1;
456 1.45 thorpej break;
457 1.45 thorpej }
458 1.31 fvdl break;
459 1.27 yamt }
460 1.45 thorpej #ifdef LOCKF_DEBUG
461 1.45 thorpej if (lockf_debug & 1)
462 1.45 thorpej lf_printlist("lf_clearlock", unlock);
463 1.45 thorpej #endif /* LOCKF_DEBUG */
464 1.45 thorpej return 0;
465 1.45 thorpej }
466 1.27 yamt
467 1.45 thorpej /*
468 1.45 thorpej * Walk the list of locks for an inode and
469 1.45 thorpej * return the first blocking lock.
470 1.45 thorpej */
471 1.45 thorpej static struct lockf *
472 1.45 thorpej lf_getblock(struct lockf *lock)
473 1.45 thorpej {
474 1.45 thorpej struct lockf **prev, *overlap, *lf = *(lock->lf_head);
475 1.27 yamt
476 1.45 thorpej prev = lock->lf_head;
477 1.45 thorpej while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
478 1.45 thorpej /*
479 1.45 thorpej * We've found an overlap, see if it blocks us
480 1.45 thorpej */
481 1.45 thorpej if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
482 1.45 thorpej return overlap;
483 1.45 thorpej /*
484 1.45 thorpej * Nope, point to the next one on the list and
485 1.45 thorpej * see if it blocks us
486 1.45 thorpej */
487 1.45 thorpej lf = overlap->lf_next;
488 1.45 thorpej }
489 1.54 yamt return NULL;
490 1.1 ws }
491 1.1 ws
492 1.1 ws /*
493 1.1 ws * Set a byte-range lock.
494 1.1 ws */
495 1.24 yamt static int
496 1.27 yamt lf_setlock(struct lockf *lock, struct lockf **sparelock,
497 1.27 yamt struct simplelock *interlock)
498 1.1 ws {
499 1.15 augustss struct lockf *block;
500 1.1 ws struct lockf **head = lock->lf_head;
501 1.1 ws struct lockf **prev, *overlap, *ltmp;
502 1.1 ws static char lockstr[] = "lockf";
503 1.1 ws int ovcase, priority, needtolink, error;
504 1.1 ws
505 1.1 ws #ifdef LOCKF_DEBUG
506 1.1 ws if (lockf_debug & 1)
507 1.1 ws lf_print("lf_setlock", lock);
508 1.1 ws #endif /* LOCKF_DEBUG */
509 1.1 ws
510 1.1 ws /*
511 1.1 ws * Set the priority
512 1.1 ws */
513 1.1 ws priority = PLOCK;
514 1.1 ws if (lock->lf_type == F_WRLCK)
515 1.1 ws priority += 4;
516 1.1 ws priority |= PCATCH;
517 1.1 ws /*
518 1.1 ws * Scan lock list for this file looking for locks that would block us.
519 1.1 ws */
520 1.7 christos while ((block = lf_getblock(lock)) != NULL) {
521 1.1 ws /*
522 1.1 ws * Free the structure and return if nonblocking.
523 1.1 ws */
524 1.1 ws if ((lock->lf_flags & F_WAIT) == 0) {
525 1.38 christos lf_free(lock);
526 1.29 yamt return EAGAIN;
527 1.1 ws }
528 1.1 ws /*
529 1.1 ws * We are blocked. Since flock style locks cover
530 1.1 ws * the whole file, there is no chance for deadlock.
531 1.1 ws * For byte-range locks we must check for deadlock.
532 1.1 ws *
533 1.1 ws * Deadlock detection is done by looking through the
534 1.1 ws * wait channels to see if there are any cycles that
535 1.1 ws * involve us. MAXDEPTH is set just to make sure we
536 1.16 sommerfe * do not go off into neverneverland.
537 1.1 ws */
538 1.1 ws if ((lock->lf_flags & F_POSIX) &&
539 1.1 ws (block->lf_flags & F_POSIX)) {
540 1.21 thorpej struct lwp *wlwp;
541 1.48 perry volatile const struct lockf *waitblock;
542 1.1 ws int i = 0;
543 1.52 yamt struct proc *p;
544 1.1 ws
545 1.52 yamt p = (struct proc *)block->lf_id;
546 1.52 yamt KASSERT(p != NULL);
547 1.52 yamt while (i++ < maxlockdepth) {
548 1.57 ad mutex_enter(&p->p_smutex);
549 1.52 yamt if (p->p_nlwps > 1) {
550 1.57 ad mutex_exit(&p->p_smutex);
551 1.52 yamt break;
552 1.52 yamt }
553 1.52 yamt wlwp = LIST_FIRST(&p->p_lwps);
554 1.57 ad lwp_lock(wlwp);
555 1.52 yamt if (wlwp->l_wmesg != lockstr) {
556 1.57 ad lwp_unlock(wlwp);
557 1.57 ad mutex_exit(&p->p_smutex);
558 1.52 yamt break;
559 1.52 yamt }
560 1.44 christos waitblock = wlwp->l_wchan;
561 1.57 ad lwp_unlock(wlwp);
562 1.57 ad mutex_exit(&p->p_smutex);
563 1.52 yamt if (waitblock == NULL) {
564 1.52 yamt /*
565 1.52 yamt * this lwp just got up but
566 1.52 yamt * not returned from ltsleep yet.
567 1.52 yamt */
568 1.52 yamt break;
569 1.52 yamt }
570 1.1 ws /* Get the owner of the blocking lock */
571 1.1 ws waitblock = waitblock->lf_next;
572 1.1 ws if ((waitblock->lf_flags & F_POSIX) == 0)
573 1.1 ws break;
574 1.52 yamt p = (struct proc *)waitblock->lf_id;
575 1.52 yamt if (p == curproc) {
576 1.38 christos lf_free(lock);
577 1.29 yamt return EDEADLK;
578 1.1 ws }
579 1.1 ws }
580 1.16 sommerfe /*
581 1.36 peter * If we're still following a dependency chain
582 1.16 sommerfe * after maxlockdepth iterations, assume we're in
583 1.16 sommerfe * a cycle to be safe.
584 1.16 sommerfe */
585 1.16 sommerfe if (i >= maxlockdepth) {
586 1.38 christos lf_free(lock);
587 1.29 yamt return EDEADLK;
588 1.16 sommerfe }
589 1.1 ws }
590 1.1 ws /*
591 1.1 ws * For flock type locks, we must first remove
592 1.1 ws * any shared locks that we hold before we sleep
593 1.1 ws * waiting for an exclusive lock.
594 1.1 ws */
595 1.1 ws if ((lock->lf_flags & F_FLOCK) &&
596 1.1 ws lock->lf_type == F_WRLCK) {
597 1.1 ws lock->lf_type = F_UNLCK;
598 1.27 yamt (void) lf_clearlock(lock, NULL);
599 1.1 ws lock->lf_type = F_WRLCK;
600 1.1 ws }
601 1.1 ws /*
602 1.1 ws * Add our lock to the blocked list and sleep until we're free.
603 1.1 ws * Remember who blocked us (for deadlock detection).
604 1.1 ws */
605 1.1 ws lock->lf_next = block;
606 1.12 fvdl TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
607 1.1 ws #ifdef LOCKF_DEBUG
608 1.1 ws if (lockf_debug & 1) {
609 1.1 ws lf_print("lf_setlock: blocking on", block);
610 1.1 ws lf_printlist("lf_setlock", block);
611 1.1 ws }
612 1.1 ws #endif /* LOCKF_DEBUG */
613 1.27 yamt error = ltsleep(lock, priority, lockstr, 0, interlock);
614 1.16 sommerfe
615 1.16 sommerfe /*
616 1.16 sommerfe * We may have been awakened by a signal (in
617 1.16 sommerfe * which case we must remove ourselves from the
618 1.16 sommerfe * blocked list) and/or by another process
619 1.16 sommerfe * releasing a lock (in which case we have already
620 1.16 sommerfe * been removed from the blocked list and our
621 1.54 yamt * lf_next field set to NULL).
622 1.16 sommerfe */
623 1.54 yamt if (lock->lf_next != NULL) {
624 1.16 sommerfe TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
625 1.54 yamt lock->lf_next = NULL;
626 1.16 sommerfe }
627 1.7 christos if (error) {
628 1.38 christos lf_free(lock);
629 1.29 yamt return error;
630 1.1 ws }
631 1.1 ws }
632 1.1 ws /*
633 1.1 ws * No blocks!! Add the lock. Note that we will
634 1.1 ws * downgrade or upgrade any overlapping locks this
635 1.1 ws * process already owns.
636 1.1 ws *
637 1.1 ws * Skip over locks owned by other processes.
638 1.1 ws * Handle any locks that overlap and are owned by ourselves.
639 1.1 ws */
640 1.1 ws prev = head;
641 1.1 ws block = *head;
642 1.1 ws needtolink = 1;
643 1.1 ws for (;;) {
644 1.7 christos ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
645 1.7 christos if (ovcase)
646 1.1 ws block = overlap->lf_next;
647 1.1 ws /*
648 1.1 ws * Six cases:
649 1.1 ws * 0) no overlap
650 1.1 ws * 1) overlap == lock
651 1.1 ws * 2) overlap contains lock
652 1.1 ws * 3) lock contains overlap
653 1.1 ws * 4) overlap starts before lock
654 1.1 ws * 5) overlap ends after lock
655 1.1 ws */
656 1.1 ws switch (ovcase) {
657 1.1 ws case 0: /* no overlap */
658 1.1 ws if (needtolink) {
659 1.1 ws *prev = lock;
660 1.1 ws lock->lf_next = overlap;
661 1.1 ws }
662 1.1 ws break;
663 1.1 ws
664 1.1 ws case 1: /* overlap == lock */
665 1.1 ws /*
666 1.1 ws * If downgrading lock, others may be
667 1.1 ws * able to acquire it.
668 1.1 ws */
669 1.1 ws if (lock->lf_type == F_RDLCK &&
670 1.1 ws overlap->lf_type == F_WRLCK)
671 1.1 ws lf_wakelock(overlap);
672 1.1 ws overlap->lf_type = lock->lf_type;
673 1.38 christos lf_free(lock);
674 1.1 ws lock = overlap; /* for debug output below */
675 1.1 ws break;
676 1.1 ws
677 1.1 ws case 2: /* overlap contains lock */
678 1.1 ws /*
679 1.1 ws * Check for common starting point and different types.
680 1.1 ws */
681 1.1 ws if (overlap->lf_type == lock->lf_type) {
682 1.38 christos lf_free(lock);
683 1.1 ws lock = overlap; /* for debug output below */
684 1.1 ws break;
685 1.1 ws }
686 1.1 ws if (overlap->lf_start == lock->lf_start) {
687 1.1 ws *prev = lock;
688 1.1 ws lock->lf_next = overlap;
689 1.1 ws overlap->lf_start = lock->lf_end + 1;
690 1.1 ws } else
691 1.27 yamt lf_split(overlap, lock, sparelock);
692 1.1 ws lf_wakelock(overlap);
693 1.1 ws break;
694 1.1 ws
695 1.1 ws case 3: /* lock contains overlap */
696 1.1 ws /*
697 1.1 ws * If downgrading lock, others may be able to
698 1.1 ws * acquire it, otherwise take the list.
699 1.1 ws */
700 1.1 ws if (lock->lf_type == F_RDLCK &&
701 1.1 ws overlap->lf_type == F_WRLCK) {
702 1.1 ws lf_wakelock(overlap);
703 1.1 ws } else {
704 1.19 matt while ((ltmp = TAILQ_FIRST(&overlap->lf_blkhd))) {
705 1.16 sommerfe KASSERT(ltmp->lf_next == overlap);
706 1.12 fvdl TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
707 1.12 fvdl lf_block);
708 1.16 sommerfe ltmp->lf_next = lock;
709 1.12 fvdl TAILQ_INSERT_TAIL(&lock->lf_blkhd,
710 1.12 fvdl ltmp, lf_block);
711 1.12 fvdl }
712 1.1 ws }
713 1.1 ws /*
714 1.1 ws * Add the new lock if necessary and delete the overlap.
715 1.1 ws */
716 1.1 ws if (needtolink) {
717 1.1 ws *prev = lock;
718 1.1 ws lock->lf_next = overlap->lf_next;
719 1.1 ws prev = &lock->lf_next;
720 1.1 ws needtolink = 0;
721 1.1 ws } else
722 1.1 ws *prev = overlap->lf_next;
723 1.39 christos lf_free(overlap);
724 1.1 ws continue;
725 1.1 ws
726 1.1 ws case 4: /* overlap starts before lock */
727 1.1 ws /*
728 1.1 ws * Add lock after overlap on the list.
729 1.1 ws */
730 1.1 ws lock->lf_next = overlap->lf_next;
731 1.1 ws overlap->lf_next = lock;
732 1.1 ws overlap->lf_end = lock->lf_start - 1;
733 1.1 ws prev = &lock->lf_next;
734 1.1 ws lf_wakelock(overlap);
735 1.1 ws needtolink = 0;
736 1.1 ws continue;
737 1.1 ws
738 1.1 ws case 5: /* overlap ends after lock */
739 1.1 ws /*
740 1.1 ws * Add the new lock before overlap.
741 1.1 ws */
742 1.1 ws if (needtolink) {
743 1.1 ws *prev = lock;
744 1.1 ws lock->lf_next = overlap;
745 1.1 ws }
746 1.1 ws overlap->lf_start = lock->lf_end + 1;
747 1.1 ws lf_wakelock(overlap);
748 1.1 ws break;
749 1.1 ws }
750 1.1 ws break;
751 1.1 ws }
752 1.1 ws #ifdef LOCKF_DEBUG
753 1.1 ws if (lockf_debug & 1) {
754 1.1 ws lf_print("lf_setlock: got the lock", lock);
755 1.1 ws lf_printlist("lf_setlock", lock);
756 1.1 ws }
757 1.1 ws #endif /* LOCKF_DEBUG */
758 1.29 yamt return 0;
759 1.1 ws }
760 1.1 ws
761 1.1 ws /*
762 1.1 ws * Check whether there is a blocking lock,
763 1.1 ws * and if so return its process identifier.
764 1.1 ws */
765 1.24 yamt static int
766 1.25 yamt lf_getlock(struct lockf *lock, struct flock *fl)
767 1.1 ws {
768 1.15 augustss struct lockf *block;
769 1.1 ws
770 1.1 ws #ifdef LOCKF_DEBUG
771 1.1 ws if (lockf_debug & 1)
772 1.1 ws lf_print("lf_getlock", lock);
773 1.1 ws #endif /* LOCKF_DEBUG */
774 1.1 ws
775 1.7 christos if ((block = lf_getblock(lock)) != NULL) {
776 1.1 ws fl->l_type = block->lf_type;
777 1.1 ws fl->l_whence = SEEK_SET;
778 1.1 ws fl->l_start = block->lf_start;
779 1.1 ws if (block->lf_end == -1)
780 1.1 ws fl->l_len = 0;
781 1.1 ws else
782 1.1 ws fl->l_len = block->lf_end - block->lf_start + 1;
783 1.1 ws if (block->lf_flags & F_POSIX)
784 1.23 mycroft fl->l_pid = ((struct proc *)block->lf_id)->p_pid;
785 1.1 ws else
786 1.1 ws fl->l_pid = -1;
787 1.1 ws } else {
788 1.1 ws fl->l_type = F_UNLCK;
789 1.1 ws }
790 1.29 yamt return 0;
791 1.1 ws }
792 1.1 ws
793 1.1 ws /*
794 1.45 thorpej * Do an advisory lock operation.
795 1.1 ws */
796 1.45 thorpej int
797 1.45 thorpej lf_advlock(struct vop_advlock_args *ap, struct lockf **head, off_t size)
798 1.1 ws {
799 1.55 ad struct lwp *l = curlwp;
800 1.45 thorpej struct flock *fl = ap->a_fl;
801 1.45 thorpej struct lockf *lock = NULL;
802 1.45 thorpej struct lockf *sparelock;
803 1.45 thorpej struct simplelock *interlock = &ap->a_vp->v_interlock;
804 1.45 thorpej off_t start, end;
805 1.45 thorpej int error = 0;
806 1.1 ws
807 1.45 thorpej /*
808 1.45 thorpej * Convert the flock structure into a start and end.
809 1.45 thorpej */
810 1.45 thorpej switch (fl->l_whence) {
811 1.45 thorpej case SEEK_SET:
812 1.45 thorpej case SEEK_CUR:
813 1.1 ws /*
814 1.45 thorpej * Caller is responsible for adding any necessary offset
815 1.45 thorpej * when SEEK_CUR is used.
816 1.1 ws */
817 1.45 thorpej start = fl->l_start;
818 1.45 thorpej break;
819 1.45 thorpej
820 1.45 thorpej case SEEK_END:
821 1.45 thorpej start = size + fl->l_start;
822 1.45 thorpej break;
823 1.45 thorpej
824 1.45 thorpej default:
825 1.45 thorpej return EINVAL;
826 1.1 ws }
827 1.45 thorpej if (start < 0)
828 1.45 thorpej return EINVAL;
829 1.1 ws
830 1.45 thorpej /*
831 1.55 ad * Allocate locks before acquiring the simple lock. We need two
832 1.55 ad * locks in the worst case.
833 1.45 thorpej */
834 1.45 thorpej switch (ap->a_op) {
835 1.45 thorpej case F_SETLK:
836 1.45 thorpej case F_UNLCK:
837 1.1 ws /*
838 1.55 ad * XXX For F_UNLCK case, we can re-use the lock.
839 1.1 ws */
840 1.46 christos if ((ap->a_flags & F_FLOCK) == 0) {
841 1.45 thorpej /*
842 1.55 ad * Byte-range lock might need one more lock.
843 1.45 thorpej */
844 1.55 ad sparelock = lf_alloc(kauth_cred_geteuid(l->l_cred), 0);
845 1.45 thorpej if (sparelock == NULL) {
846 1.45 thorpej error = ENOMEM;
847 1.45 thorpej goto quit;
848 1.45 thorpej }
849 1.45 thorpej break;
850 1.1 ws }
851 1.45 thorpej /* FALLTHROUGH */
852 1.45 thorpej
853 1.45 thorpej case F_GETLK:
854 1.45 thorpej sparelock = NULL;
855 1.45 thorpej break;
856 1.45 thorpej
857 1.45 thorpej default:
858 1.45 thorpej return EINVAL;
859 1.45 thorpej }
860 1.45 thorpej
861 1.55 ad lock = lf_alloc(kauth_cred_geteuid(l->l_cred),
862 1.55 ad ap->a_op != F_UNLCK ? 1 : 2);
863 1.45 thorpej if (lock == NULL) {
864 1.45 thorpej error = ENOMEM;
865 1.45 thorpej goto quit;
866 1.1 ws }
867 1.1 ws
868 1.45 thorpej simple_lock(interlock);
869 1.1 ws
870 1.1 ws /*
871 1.45 thorpej * Avoid the common case of unlocking when inode has no locks.
872 1.1 ws */
873 1.45 thorpej if (*head == (struct lockf *)0) {
874 1.45 thorpej if (ap->a_op != F_SETLK) {
875 1.45 thorpej fl->l_type = F_UNLCK;
876 1.45 thorpej error = 0;
877 1.45 thorpej goto quit_unlock;
878 1.45 thorpej }
879 1.1 ws }
880 1.45 thorpej
881 1.45 thorpej if (fl->l_len == 0)
882 1.45 thorpej end = -1;
883 1.45 thorpej else
884 1.45 thorpej end = start + fl->l_len - 1;
885 1.1 ws /*
886 1.45 thorpej * Create the lockf structure.
887 1.45 thorpej */
888 1.45 thorpej lock->lf_start = start;
889 1.45 thorpej lock->lf_end = end;
890 1.45 thorpej /* XXX NJWLWP
891 1.45 thorpej * I don't want to make the entire VFS universe use LWPs, because
892 1.45 thorpej * they don't need them, for the most part. This is an exception,
893 1.45 thorpej * and a kluge.
894 1.1 ws */
895 1.45 thorpej
896 1.45 thorpej lock->lf_head = head;
897 1.45 thorpej lock->lf_type = fl->l_type;
898 1.45 thorpej lock->lf_next = (struct lockf *)0;
899 1.45 thorpej TAILQ_INIT(&lock->lf_blkhd);
900 1.45 thorpej lock->lf_flags = ap->a_flags;
901 1.45 thorpej if (lock->lf_flags & F_POSIX) {
902 1.45 thorpej KASSERT(curproc == (struct proc *)ap->a_id);
903 1.45 thorpej }
904 1.45 thorpej lock->lf_id = (struct proc *)ap->a_id;
905 1.45 thorpej
906 1.1 ws /*
907 1.45 thorpej * Do the requested operation.
908 1.1 ws */
909 1.45 thorpej switch (ap->a_op) {
910 1.1 ws
911 1.45 thorpej case F_SETLK:
912 1.45 thorpej error = lf_setlock(lock, &sparelock, interlock);
913 1.45 thorpej lock = NULL; /* lf_setlock freed it */
914 1.45 thorpej break;
915 1.1 ws
916 1.45 thorpej case F_UNLCK:
917 1.45 thorpej error = lf_clearlock(lock, &sparelock);
918 1.45 thorpej break;
919 1.1 ws
920 1.45 thorpej case F_GETLK:
921 1.45 thorpej error = lf_getlock(lock, fl);
922 1.45 thorpej break;
923 1.37 perry
924 1.45 thorpej default:
925 1.45 thorpej break;
926 1.45 thorpej /* NOTREACHED */
927 1.45 thorpej }
928 1.1 ws
929 1.45 thorpej quit_unlock:
930 1.45 thorpej simple_unlock(interlock);
931 1.45 thorpej quit:
932 1.45 thorpej if (lock)
933 1.45 thorpej lf_free(lock);
934 1.45 thorpej if (sparelock)
935 1.45 thorpej lf_free(sparelock);
936 1.1 ws
937 1.45 thorpej return error;
938 1.1 ws }
939