vfs_lockf.c revision 1.50 1 1.49 elad /* $NetBSD: vfs_lockf.c,v 1.50 2006/05/20 12:02:47 yamt Exp $ */
2 1.5 cgd
3 1.1 ws /*
4 1.4 mycroft * Copyright (c) 1982, 1986, 1989, 1993
5 1.4 mycroft * The Regents of the University of California. All rights reserved.
6 1.1 ws *
7 1.1 ws * This code is derived from software contributed to Berkeley by
8 1.1 ws * Scooter Morris at Genentech Inc.
9 1.1 ws *
10 1.1 ws * Redistribution and use in source and binary forms, with or without
11 1.1 ws * modification, are permitted provided that the following conditions
12 1.1 ws * are met:
13 1.1 ws * 1. Redistributions of source code must retain the above copyright
14 1.1 ws * notice, this list of conditions and the following disclaimer.
15 1.1 ws * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 ws * notice, this list of conditions and the following disclaimer in the
17 1.1 ws * documentation and/or other materials provided with the distribution.
18 1.33 agc * 3. Neither the name of the University nor the names of its contributors
19 1.1 ws * may be used to endorse or promote products derived from this software
20 1.1 ws * without specific prior written permission.
21 1.1 ws *
22 1.1 ws * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 1.1 ws * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 1.1 ws * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 1.1 ws * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 1.1 ws * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 1.1 ws * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 1.1 ws * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 1.1 ws * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 1.1 ws * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 1.1 ws * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 1.1 ws * SUCH DAMAGE.
33 1.1 ws *
34 1.12 fvdl * @(#)ufs_lockf.c 8.4 (Berkeley) 10/26/94
35 1.1 ws */
36 1.18 lukem
37 1.18 lukem #include <sys/cdefs.h>
38 1.49 elad __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.50 2006/05/20 12:02:47 yamt Exp $");
39 1.1 ws
40 1.1 ws #include <sys/param.h>
41 1.1 ws #include <sys/systm.h>
42 1.1 ws #include <sys/kernel.h>
43 1.1 ws #include <sys/file.h>
44 1.1 ws #include <sys/proc.h>
45 1.1 ws #include <sys/vnode.h>
46 1.35 simonb #include <sys/pool.h>
47 1.1 ws #include <sys/fcntl.h>
48 1.1 ws #include <sys/lockf.h>
49 1.49 elad #include <sys/kauth.h>
50 1.22 thorpej
51 1.50 yamt /*
52 1.50 yamt * The lockf structure is a kernel structure which contains the information
53 1.50 yamt * associated with a byte range lock. The lockf structures are linked into
54 1.50 yamt * the inode structure. Locks are sorted by the starting byte of the lock for
55 1.50 yamt * efficiency.
56 1.50 yamt *
57 1.50 yamt * lf_next is used for two purposes, depending on whether the lock is
58 1.50 yamt * being held, or is in conflict with an existing lock. If this lock
59 1.50 yamt * is held, it indicates the next lock on the same vnode.
60 1.50 yamt * For pending locks, if lock->lf_next is non-NULL, then lock->lf_block
61 1.50 yamt * must be queued on the lf_blkhd TAILQ of lock->lf_next.
62 1.50 yamt */
63 1.50 yamt
64 1.50 yamt TAILQ_HEAD(locklist, lockf);
65 1.50 yamt
66 1.50 yamt struct lockf {
67 1.50 yamt short lf_flags; /* Lock semantics: F_POSIX, F_FLOCK, F_WAIT */
68 1.50 yamt short lf_type; /* Lock type: F_RDLCK, F_WRLCK */
69 1.50 yamt off_t lf_start; /* The byte # of the start of the lock */
70 1.50 yamt off_t lf_end; /* The byte # of the end of the lock (-1=EOF)*/
71 1.50 yamt void *lf_id; /* process or file description holding lock */
72 1.50 yamt struct lwp *lf_lwp; /* LWP waiting for lock */
73 1.50 yamt struct lockf **lf_head; /* Back pointer to the head of lockf list */
74 1.50 yamt struct lockf *lf_next; /* Next lock on this vnode, or blocking lock */
75 1.50 yamt struct locklist lf_blkhd; /* List of requests blocked on this lock */
76 1.50 yamt TAILQ_ENTRY(lockf) lf_block;/* A request waiting for a lock */
77 1.50 yamt uid_t lf_uid; /* User ID responsible */
78 1.50 yamt };
79 1.50 yamt
80 1.50 yamt /* Maximum length of sleep chains to traverse to try and detect deadlock. */
81 1.50 yamt #define MAXDEPTH 50
82 1.50 yamt
83 1.35 simonb POOL_INIT(lockfpool, sizeof(struct lockf), 0, 0, 0, "lockfpl",
84 1.35 simonb &pool_allocator_nointr);
85 1.1 ws
86 1.1 ws /*
87 1.6 mycroft * This variable controls the maximum number of processes that will
88 1.6 mycroft * be checked in doing deadlock detection.
89 1.6 mycroft */
90 1.6 mycroft int maxlockdepth = MAXDEPTH;
91 1.6 mycroft
92 1.6 mycroft #ifdef LOCKF_DEBUG
93 1.6 mycroft int lockf_debug = 0;
94 1.6 mycroft #endif
95 1.6 mycroft
96 1.6 mycroft #define NOLOCKF (struct lockf *)0
97 1.6 mycroft #define SELF 0x1
98 1.6 mycroft #define OTHERS 0x2
99 1.6 mycroft
100 1.6 mycroft /*
101 1.16 sommerfe * XXX TODO
102 1.16 sommerfe * Misc cleanups: "caddr_t id" should be visible in the API as a
103 1.16 sommerfe * "struct proc *".
104 1.16 sommerfe * (This requires rototilling all VFS's which support advisory locking).
105 1.16 sommerfe */
106 1.16 sommerfe
107 1.16 sommerfe /*
108 1.16 sommerfe * If there's a lot of lock contention on a single vnode, locking
109 1.16 sommerfe * schemes which allow for more paralleism would be needed. Given how
110 1.16 sommerfe * infrequently byte-range locks are actually used in typical BSD
111 1.16 sommerfe * code, a more complex approach probably isn't worth it.
112 1.16 sommerfe */
113 1.16 sommerfe
114 1.16 sommerfe /*
115 1.38 christos * We enforce a limit on locks by uid, so that a single user cannot
116 1.38 christos * run the kernel out of memory. For now, the limit is pretty coarse.
117 1.38 christos * There is no limit on root.
118 1.38 christos *
119 1.38 christos * Splitting a lock will always succeed, regardless of current allocations.
120 1.38 christos * If you're slightly above the limit, we still have to permit an allocation
121 1.38 christos * so that the unlock can succeed. If the unlocking causes too many splits,
122 1.38 christos * however, you're totally cutoff.
123 1.38 christos */
124 1.38 christos int maxlocksperuid = 1024;
125 1.38 christos
126 1.45 thorpej #ifdef LOCKF_DEBUG
127 1.45 thorpej /*
128 1.45 thorpej * Print out a lock.
129 1.45 thorpej */
130 1.45 thorpej static void
131 1.45 thorpej lf_print(char *tag, struct lockf *lock)
132 1.45 thorpej {
133 1.45 thorpej
134 1.45 thorpej printf("%s: lock %p for ", tag, lock);
135 1.45 thorpej if (lock->lf_flags & F_POSIX)
136 1.45 thorpej printf("proc %d", ((struct proc *)lock->lf_id)->p_pid);
137 1.45 thorpej else
138 1.45 thorpej printf("file %p", (struct file *)lock->lf_id);
139 1.45 thorpej printf(" %s, start %qx, end %qx",
140 1.45 thorpej lock->lf_type == F_RDLCK ? "shared" :
141 1.45 thorpej lock->lf_type == F_WRLCK ? "exclusive" :
142 1.45 thorpej lock->lf_type == F_UNLCK ? "unlock" :
143 1.45 thorpej "unknown", lock->lf_start, lock->lf_end);
144 1.45 thorpej if (TAILQ_FIRST(&lock->lf_blkhd))
145 1.45 thorpej printf(" block %p\n", TAILQ_FIRST(&lock->lf_blkhd));
146 1.45 thorpej else
147 1.45 thorpej printf("\n");
148 1.45 thorpej }
149 1.45 thorpej
150 1.45 thorpej static void
151 1.45 thorpej lf_printlist(char *tag, struct lockf *lock)
152 1.45 thorpej {
153 1.45 thorpej struct lockf *lf, *blk;
154 1.45 thorpej
155 1.45 thorpej printf("%s: Lock list:\n", tag);
156 1.45 thorpej for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
157 1.45 thorpej printf("\tlock %p for ", lf);
158 1.45 thorpej if (lf->lf_flags & F_POSIX)
159 1.45 thorpej printf("proc %d", ((struct proc *)lf->lf_id)->p_pid);
160 1.45 thorpej else
161 1.45 thorpej printf("file %p", (struct file *)lf->lf_id);
162 1.45 thorpej printf(", %s, start %qx, end %qx",
163 1.45 thorpej lf->lf_type == F_RDLCK ? "shared" :
164 1.45 thorpej lf->lf_type == F_WRLCK ? "exclusive" :
165 1.45 thorpej lf->lf_type == F_UNLCK ? "unlock" :
166 1.45 thorpej "unknown", lf->lf_start, lf->lf_end);
167 1.45 thorpej TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
168 1.45 thorpej if (blk->lf_flags & F_POSIX)
169 1.45 thorpej printf("proc %d",
170 1.45 thorpej ((struct proc *)blk->lf_id)->p_pid);
171 1.45 thorpej else
172 1.45 thorpej printf("file %p", (struct file *)blk->lf_id);
173 1.45 thorpej printf(", %s, start %qx, end %qx",
174 1.45 thorpej blk->lf_type == F_RDLCK ? "shared" :
175 1.45 thorpej blk->lf_type == F_WRLCK ? "exclusive" :
176 1.45 thorpej blk->lf_type == F_UNLCK ? "unlock" :
177 1.45 thorpej "unknown", blk->lf_start, blk->lf_end);
178 1.45 thorpej if (TAILQ_FIRST(&blk->lf_blkhd))
179 1.45 thorpej panic("lf_printlist: bad list");
180 1.45 thorpej }
181 1.45 thorpej printf("\n");
182 1.45 thorpej }
183 1.45 thorpej }
184 1.45 thorpej #endif /* LOCKF_DEBUG */
185 1.45 thorpej
186 1.38 christos /*
187 1.38 christos * 3 options for allowfail.
188 1.38 christos * 0 - always allocate. 1 - cutoff at limit. 2 - cutoff at double limit.
189 1.38 christos */
190 1.45 thorpej static struct lockf *
191 1.38 christos lf_alloc(uid_t uid, int allowfail)
192 1.38 christos {
193 1.38 christos struct uidinfo *uip;
194 1.38 christos struct lockf *lock;
195 1.41 christos int s;
196 1.38 christos
197 1.38 christos uip = uid_find(uid);
198 1.41 christos UILOCK(uip, s);
199 1.38 christos if (uid && allowfail && uip->ui_lockcnt >
200 1.40 christos (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2))) {
201 1.41 christos UIUNLOCK(uip, s);
202 1.40 christos return NULL;
203 1.40 christos }
204 1.38 christos uip->ui_lockcnt++;
205 1.41 christos UIUNLOCK(uip, s);
206 1.38 christos lock = pool_get(&lockfpool, PR_WAITOK);
207 1.38 christos lock->lf_uid = uid;
208 1.40 christos return lock;
209 1.38 christos }
210 1.38 christos
211 1.45 thorpej static void
212 1.38 christos lf_free(struct lockf *lock)
213 1.38 christos {
214 1.38 christos struct uidinfo *uip;
215 1.41 christos int s;
216 1.38 christos
217 1.38 christos uip = uid_find(lock->lf_uid);
218 1.41 christos UILOCK(uip, s);
219 1.38 christos uip->ui_lockcnt--;
220 1.41 christos UIUNLOCK(uip, s);
221 1.38 christos pool_put(&lockfpool, lock);
222 1.38 christos }
223 1.38 christos
224 1.38 christos /*
225 1.45 thorpej * Walk the list of locks for an inode to
226 1.45 thorpej * find an overlapping lock (if any).
227 1.45 thorpej *
228 1.45 thorpej * NOTE: this returns only the FIRST overlapping lock. There
229 1.45 thorpej * may be more than one.
230 1.1 ws */
231 1.45 thorpej static int
232 1.45 thorpej lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
233 1.45 thorpej struct lockf ***prev, struct lockf **overlap)
234 1.1 ws {
235 1.1 ws off_t start, end;
236 1.1 ws
237 1.45 thorpej *overlap = lf;
238 1.45 thorpej if (lf == NOLOCKF)
239 1.45 thorpej return 0;
240 1.45 thorpej #ifdef LOCKF_DEBUG
241 1.45 thorpej if (lockf_debug & 2)
242 1.45 thorpej lf_print("lf_findoverlap: looking for overlap in", lock);
243 1.45 thorpej #endif /* LOCKF_DEBUG */
244 1.45 thorpej start = lock->lf_start;
245 1.45 thorpej end = lock->lf_end;
246 1.45 thorpej while (lf != NOLOCKF) {
247 1.45 thorpej if (((type == SELF) && lf->lf_id != lock->lf_id) ||
248 1.45 thorpej ((type == OTHERS) && lf->lf_id == lock->lf_id)) {
249 1.45 thorpej *prev = &lf->lf_next;
250 1.45 thorpej *overlap = lf = lf->lf_next;
251 1.45 thorpej continue;
252 1.45 thorpej }
253 1.45 thorpej #ifdef LOCKF_DEBUG
254 1.45 thorpej if (lockf_debug & 2)
255 1.45 thorpej lf_print("\tchecking", lf);
256 1.45 thorpej #endif /* LOCKF_DEBUG */
257 1.1 ws /*
258 1.45 thorpej * OK, check for overlap
259 1.45 thorpej *
260 1.45 thorpej * Six cases:
261 1.45 thorpej * 0) no overlap
262 1.45 thorpej * 1) overlap == lock
263 1.45 thorpej * 2) overlap contains lock
264 1.45 thorpej * 3) lock contains overlap
265 1.45 thorpej * 4) overlap starts before lock
266 1.45 thorpej * 5) overlap ends after lock
267 1.1 ws */
268 1.45 thorpej if ((lf->lf_end != -1 && start > lf->lf_end) ||
269 1.45 thorpej (end != -1 && lf->lf_start > end)) {
270 1.45 thorpej /* Case 0 */
271 1.45 thorpej #ifdef LOCKF_DEBUG
272 1.45 thorpej if (lockf_debug & 2)
273 1.45 thorpej printf("no overlap\n");
274 1.45 thorpej #endif /* LOCKF_DEBUG */
275 1.45 thorpej if ((type & SELF) && end != -1 && lf->lf_start > end)
276 1.45 thorpej return 0;
277 1.45 thorpej *prev = &lf->lf_next;
278 1.45 thorpej *overlap = lf = lf->lf_next;
279 1.45 thorpej continue;
280 1.45 thorpej }
281 1.45 thorpej if ((lf->lf_start == start) && (lf->lf_end == end)) {
282 1.45 thorpej /* Case 1 */
283 1.45 thorpej #ifdef LOCKF_DEBUG
284 1.45 thorpej if (lockf_debug & 2)
285 1.45 thorpej printf("overlap == lock\n");
286 1.45 thorpej #endif /* LOCKF_DEBUG */
287 1.45 thorpej return 1;
288 1.45 thorpej }
289 1.45 thorpej if ((lf->lf_start <= start) &&
290 1.45 thorpej (end != -1) &&
291 1.45 thorpej ((lf->lf_end >= end) || (lf->lf_end == -1))) {
292 1.45 thorpej /* Case 2 */
293 1.45 thorpej #ifdef LOCKF_DEBUG
294 1.45 thorpej if (lockf_debug & 2)
295 1.45 thorpej printf("overlap contains lock\n");
296 1.45 thorpej #endif /* LOCKF_DEBUG */
297 1.45 thorpej return 2;
298 1.45 thorpej }
299 1.45 thorpej if (start <= lf->lf_start &&
300 1.45 thorpej (end == -1 ||
301 1.45 thorpej (lf->lf_end != -1 && end >= lf->lf_end))) {
302 1.45 thorpej /* Case 3 */
303 1.45 thorpej #ifdef LOCKF_DEBUG
304 1.45 thorpej if (lockf_debug & 2)
305 1.45 thorpej printf("lock contains overlap\n");
306 1.45 thorpej #endif /* LOCKF_DEBUG */
307 1.45 thorpej return 3;
308 1.45 thorpej }
309 1.45 thorpej if ((lf->lf_start < start) &&
310 1.45 thorpej ((lf->lf_end >= start) || (lf->lf_end == -1))) {
311 1.45 thorpej /* Case 4 */
312 1.45 thorpej #ifdef LOCKF_DEBUG
313 1.45 thorpej if (lockf_debug & 2)
314 1.45 thorpej printf("overlap starts before lock\n");
315 1.45 thorpej #endif /* LOCKF_DEBUG */
316 1.45 thorpej return 4;
317 1.45 thorpej }
318 1.45 thorpej if ((lf->lf_start > start) &&
319 1.45 thorpej (end != -1) &&
320 1.45 thorpej ((lf->lf_end > end) || (lf->lf_end == -1))) {
321 1.45 thorpej /* Case 5 */
322 1.45 thorpej #ifdef LOCKF_DEBUG
323 1.45 thorpej if (lockf_debug & 2)
324 1.45 thorpej printf("overlap ends after lock\n");
325 1.45 thorpej #endif /* LOCKF_DEBUG */
326 1.45 thorpej return 5;
327 1.45 thorpej }
328 1.45 thorpej panic("lf_findoverlap: default");
329 1.45 thorpej }
330 1.45 thorpej return 0;
331 1.45 thorpej }
332 1.1 ws
333 1.45 thorpej /*
334 1.45 thorpej * Split a lock and a contained region into
335 1.45 thorpej * two or three locks as necessary.
336 1.45 thorpej */
337 1.45 thorpej static void
338 1.45 thorpej lf_split(struct lockf *lock1, struct lockf *lock2, struct lockf **sparelock)
339 1.45 thorpej {
340 1.45 thorpej struct lockf *splitlock;
341 1.1 ws
342 1.45 thorpej #ifdef LOCKF_DEBUG
343 1.45 thorpej if (lockf_debug & 2) {
344 1.45 thorpej lf_print("lf_split", lock1);
345 1.45 thorpej lf_print("splitting from", lock2);
346 1.1 ws }
347 1.45 thorpej #endif /* LOCKF_DEBUG */
348 1.10 kleink /*
349 1.45 thorpej * Check to see if spliting into only two pieces.
350 1.27 yamt */
351 1.45 thorpej if (lock1->lf_start == lock2->lf_start) {
352 1.45 thorpej lock1->lf_start = lock2->lf_end + 1;
353 1.45 thorpej lock2->lf_next = lock1;
354 1.45 thorpej return;
355 1.27 yamt }
356 1.45 thorpej if (lock1->lf_end == lock2->lf_end) {
357 1.45 thorpej lock1->lf_end = lock2->lf_start - 1;
358 1.45 thorpej lock2->lf_next = lock1->lf_next;
359 1.45 thorpej lock1->lf_next = lock2;
360 1.45 thorpej return;
361 1.27 yamt }
362 1.27 yamt /*
363 1.45 thorpej * Make a new lock consisting of the last part of
364 1.45 thorpej * the encompassing lock
365 1.10 kleink */
366 1.45 thorpej splitlock = *sparelock;
367 1.45 thorpej *sparelock = NULL;
368 1.45 thorpej memcpy(splitlock, lock1, sizeof(*splitlock));
369 1.45 thorpej splitlock->lf_start = lock2->lf_end + 1;
370 1.45 thorpej TAILQ_INIT(&splitlock->lf_blkhd);
371 1.45 thorpej lock1->lf_end = lock2->lf_start - 1;
372 1.1 ws /*
373 1.45 thorpej * OK, now link it in
374 1.21 thorpej */
375 1.45 thorpej splitlock->lf_next = lock1->lf_next;
376 1.45 thorpej lock2->lf_next = splitlock;
377 1.45 thorpej lock1->lf_next = lock2;
378 1.45 thorpej }
379 1.45 thorpej
380 1.45 thorpej /*
381 1.45 thorpej * Wakeup a blocklist
382 1.45 thorpej */
383 1.45 thorpej static void
384 1.45 thorpej lf_wakelock(struct lockf *listhead)
385 1.45 thorpej {
386 1.45 thorpej struct lockf *wakelock;
387 1.21 thorpej
388 1.45 thorpej while ((wakelock = TAILQ_FIRST(&listhead->lf_blkhd))) {
389 1.45 thorpej KASSERT(wakelock->lf_next == listhead);
390 1.45 thorpej TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
391 1.45 thorpej wakelock->lf_next = NOLOCKF;
392 1.45 thorpej #ifdef LOCKF_DEBUG
393 1.45 thorpej if (lockf_debug & 2)
394 1.45 thorpej lf_print("lf_wakelock: awakening", wakelock);
395 1.45 thorpej #endif
396 1.45 thorpej wakeup(wakelock);
397 1.21 thorpej }
398 1.45 thorpej }
399 1.45 thorpej
400 1.45 thorpej /*
401 1.45 thorpej * Remove a byte-range lock on an inode.
402 1.45 thorpej *
403 1.45 thorpej * Generally, find the lock (or an overlap to that lock)
404 1.45 thorpej * and remove it (or shrink it), then wakeup anyone we can.
405 1.45 thorpej */
406 1.45 thorpej static int
407 1.45 thorpej lf_clearlock(struct lockf *unlock, struct lockf **sparelock)
408 1.45 thorpej {
409 1.45 thorpej struct lockf **head = unlock->lf_head;
410 1.45 thorpej struct lockf *lf = *head;
411 1.45 thorpej struct lockf *overlap, **prev;
412 1.45 thorpej int ovcase;
413 1.45 thorpej
414 1.45 thorpej if (lf == NOLOCKF)
415 1.45 thorpej return 0;
416 1.45 thorpej #ifdef LOCKF_DEBUG
417 1.45 thorpej if (unlock->lf_type != F_UNLCK)
418 1.45 thorpej panic("lf_clearlock: bad type");
419 1.45 thorpej if (lockf_debug & 1)
420 1.45 thorpej lf_print("lf_clearlock", unlock);
421 1.45 thorpej #endif /* LOCKF_DEBUG */
422 1.45 thorpej prev = head;
423 1.45 thorpej while ((ovcase = lf_findoverlap(lf, unlock, SELF,
424 1.45 thorpej &prev, &overlap)) != 0) {
425 1.45 thorpej /*
426 1.45 thorpej * Wakeup the list of locks to be retried.
427 1.45 thorpej */
428 1.45 thorpej lf_wakelock(overlap);
429 1.45 thorpej
430 1.45 thorpej switch (ovcase) {
431 1.37 perry
432 1.45 thorpej case 1: /* overlap == lock */
433 1.45 thorpej *prev = overlap->lf_next;
434 1.45 thorpej lf_free(overlap);
435 1.45 thorpej break;
436 1.4 mycroft
437 1.45 thorpej case 2: /* overlap contains lock: split it */
438 1.45 thorpej if (overlap->lf_start == unlock->lf_start) {
439 1.45 thorpej overlap->lf_start = unlock->lf_end + 1;
440 1.45 thorpej break;
441 1.45 thorpej }
442 1.45 thorpej lf_split(overlap, unlock, sparelock);
443 1.45 thorpej overlap->lf_next = unlock->lf_next;
444 1.45 thorpej break;
445 1.1 ws
446 1.45 thorpej case 3: /* lock contains overlap */
447 1.45 thorpej *prev = overlap->lf_next;
448 1.45 thorpej lf = overlap->lf_next;
449 1.45 thorpej lf_free(overlap);
450 1.45 thorpej continue;
451 1.1 ws
452 1.45 thorpej case 4: /* overlap starts before lock */
453 1.45 thorpej overlap->lf_end = unlock->lf_start - 1;
454 1.45 thorpej prev = &overlap->lf_next;
455 1.45 thorpej lf = overlap->lf_next;
456 1.45 thorpej continue;
457 1.4 mycroft
458 1.45 thorpej case 5: /* overlap ends after lock */
459 1.45 thorpej overlap->lf_start = unlock->lf_end + 1;
460 1.45 thorpej break;
461 1.45 thorpej }
462 1.31 fvdl break;
463 1.27 yamt }
464 1.45 thorpej #ifdef LOCKF_DEBUG
465 1.45 thorpej if (lockf_debug & 1)
466 1.45 thorpej lf_printlist("lf_clearlock", unlock);
467 1.45 thorpej #endif /* LOCKF_DEBUG */
468 1.45 thorpej return 0;
469 1.45 thorpej }
470 1.27 yamt
471 1.45 thorpej /*
472 1.45 thorpej * Walk the list of locks for an inode and
473 1.45 thorpej * return the first blocking lock.
474 1.45 thorpej */
475 1.45 thorpej static struct lockf *
476 1.45 thorpej lf_getblock(struct lockf *lock)
477 1.45 thorpej {
478 1.45 thorpej struct lockf **prev, *overlap, *lf = *(lock->lf_head);
479 1.27 yamt
480 1.45 thorpej prev = lock->lf_head;
481 1.45 thorpej while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
482 1.45 thorpej /*
483 1.45 thorpej * We've found an overlap, see if it blocks us
484 1.45 thorpej */
485 1.45 thorpej if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
486 1.45 thorpej return overlap;
487 1.45 thorpej /*
488 1.45 thorpej * Nope, point to the next one on the list and
489 1.45 thorpej * see if it blocks us
490 1.45 thorpej */
491 1.45 thorpej lf = overlap->lf_next;
492 1.45 thorpej }
493 1.45 thorpej return NOLOCKF;
494 1.1 ws }
495 1.1 ws
496 1.1 ws /*
497 1.1 ws * Set a byte-range lock.
498 1.1 ws */
499 1.24 yamt static int
500 1.27 yamt lf_setlock(struct lockf *lock, struct lockf **sparelock,
501 1.27 yamt struct simplelock *interlock)
502 1.1 ws {
503 1.15 augustss struct lockf *block;
504 1.1 ws struct lockf **head = lock->lf_head;
505 1.1 ws struct lockf **prev, *overlap, *ltmp;
506 1.1 ws static char lockstr[] = "lockf";
507 1.1 ws int ovcase, priority, needtolink, error;
508 1.1 ws
509 1.1 ws #ifdef LOCKF_DEBUG
510 1.1 ws if (lockf_debug & 1)
511 1.1 ws lf_print("lf_setlock", lock);
512 1.1 ws #endif /* LOCKF_DEBUG */
513 1.1 ws
514 1.1 ws /*
515 1.1 ws * Set the priority
516 1.1 ws */
517 1.1 ws priority = PLOCK;
518 1.1 ws if (lock->lf_type == F_WRLCK)
519 1.1 ws priority += 4;
520 1.1 ws priority |= PCATCH;
521 1.1 ws /*
522 1.1 ws * Scan lock list for this file looking for locks that would block us.
523 1.1 ws */
524 1.7 christos while ((block = lf_getblock(lock)) != NULL) {
525 1.1 ws /*
526 1.1 ws * Free the structure and return if nonblocking.
527 1.1 ws */
528 1.1 ws if ((lock->lf_flags & F_WAIT) == 0) {
529 1.38 christos lf_free(lock);
530 1.29 yamt return EAGAIN;
531 1.1 ws }
532 1.1 ws /*
533 1.1 ws * We are blocked. Since flock style locks cover
534 1.1 ws * the whole file, there is no chance for deadlock.
535 1.1 ws * For byte-range locks we must check for deadlock.
536 1.1 ws *
537 1.1 ws * Deadlock detection is done by looking through the
538 1.1 ws * wait channels to see if there are any cycles that
539 1.1 ws * involve us. MAXDEPTH is set just to make sure we
540 1.16 sommerfe * do not go off into neverneverland.
541 1.1 ws */
542 1.1 ws if ((lock->lf_flags & F_POSIX) &&
543 1.1 ws (block->lf_flags & F_POSIX)) {
544 1.21 thorpej struct lwp *wlwp;
545 1.48 perry volatile const struct lockf *waitblock;
546 1.1 ws int i = 0;
547 1.1 ws
548 1.23 mycroft /*
549 1.23 mycroft * The block is waiting on something. if_lwp will be
550 1.23 mycroft * 0 once the lock is granted, so we terminate the
551 1.23 mycroft * loop if we find this.
552 1.23 mycroft */
553 1.23 mycroft wlwp = block->lf_lwp;
554 1.23 mycroft while (wlwp && (i++ < maxlockdepth)) {
555 1.44 christos waitblock = wlwp->l_wchan;
556 1.1 ws /* Get the owner of the blocking lock */
557 1.1 ws waitblock = waitblock->lf_next;
558 1.1 ws if ((waitblock->lf_flags & F_POSIX) == 0)
559 1.1 ws break;
560 1.23 mycroft wlwp = waitblock->lf_lwp;
561 1.23 mycroft if (wlwp == lock->lf_lwp) {
562 1.38 christos lf_free(lock);
563 1.29 yamt return EDEADLK;
564 1.1 ws }
565 1.1 ws }
566 1.16 sommerfe /*
567 1.36 peter * If we're still following a dependency chain
568 1.16 sommerfe * after maxlockdepth iterations, assume we're in
569 1.16 sommerfe * a cycle to be safe.
570 1.16 sommerfe */
571 1.16 sommerfe if (i >= maxlockdepth) {
572 1.38 christos lf_free(lock);
573 1.29 yamt return EDEADLK;
574 1.16 sommerfe }
575 1.1 ws }
576 1.1 ws /*
577 1.1 ws * For flock type locks, we must first remove
578 1.1 ws * any shared locks that we hold before we sleep
579 1.1 ws * waiting for an exclusive lock.
580 1.1 ws */
581 1.1 ws if ((lock->lf_flags & F_FLOCK) &&
582 1.1 ws lock->lf_type == F_WRLCK) {
583 1.1 ws lock->lf_type = F_UNLCK;
584 1.27 yamt (void) lf_clearlock(lock, NULL);
585 1.1 ws lock->lf_type = F_WRLCK;
586 1.1 ws }
587 1.1 ws /*
588 1.1 ws * Add our lock to the blocked list and sleep until we're free.
589 1.1 ws * Remember who blocked us (for deadlock detection).
590 1.1 ws */
591 1.1 ws lock->lf_next = block;
592 1.12 fvdl TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
593 1.1 ws #ifdef LOCKF_DEBUG
594 1.1 ws if (lockf_debug & 1) {
595 1.1 ws lf_print("lf_setlock: blocking on", block);
596 1.1 ws lf_printlist("lf_setlock", block);
597 1.1 ws }
598 1.1 ws #endif /* LOCKF_DEBUG */
599 1.27 yamt error = ltsleep(lock, priority, lockstr, 0, interlock);
600 1.16 sommerfe
601 1.16 sommerfe /*
602 1.16 sommerfe * We may have been awakened by a signal (in
603 1.16 sommerfe * which case we must remove ourselves from the
604 1.16 sommerfe * blocked list) and/or by another process
605 1.16 sommerfe * releasing a lock (in which case we have already
606 1.16 sommerfe * been removed from the blocked list and our
607 1.16 sommerfe * lf_next field set to NOLOCKF).
608 1.16 sommerfe */
609 1.16 sommerfe if (lock->lf_next != NOLOCKF) {
610 1.16 sommerfe TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
611 1.16 sommerfe lock->lf_next = NOLOCKF;
612 1.16 sommerfe }
613 1.7 christos if (error) {
614 1.38 christos lf_free(lock);
615 1.29 yamt return error;
616 1.1 ws }
617 1.1 ws }
618 1.1 ws /*
619 1.1 ws * No blocks!! Add the lock. Note that we will
620 1.1 ws * downgrade or upgrade any overlapping locks this
621 1.1 ws * process already owns.
622 1.1 ws *
623 1.1 ws * Skip over locks owned by other processes.
624 1.1 ws * Handle any locks that overlap and are owned by ourselves.
625 1.1 ws */
626 1.23 mycroft lock->lf_lwp = 0;
627 1.1 ws prev = head;
628 1.1 ws block = *head;
629 1.1 ws needtolink = 1;
630 1.1 ws for (;;) {
631 1.7 christos ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
632 1.7 christos if (ovcase)
633 1.1 ws block = overlap->lf_next;
634 1.1 ws /*
635 1.1 ws * Six cases:
636 1.1 ws * 0) no overlap
637 1.1 ws * 1) overlap == lock
638 1.1 ws * 2) overlap contains lock
639 1.1 ws * 3) lock contains overlap
640 1.1 ws * 4) overlap starts before lock
641 1.1 ws * 5) overlap ends after lock
642 1.1 ws */
643 1.1 ws switch (ovcase) {
644 1.1 ws case 0: /* no overlap */
645 1.1 ws if (needtolink) {
646 1.1 ws *prev = lock;
647 1.1 ws lock->lf_next = overlap;
648 1.1 ws }
649 1.1 ws break;
650 1.1 ws
651 1.1 ws case 1: /* overlap == lock */
652 1.1 ws /*
653 1.1 ws * If downgrading lock, others may be
654 1.1 ws * able to acquire it.
655 1.1 ws */
656 1.1 ws if (lock->lf_type == F_RDLCK &&
657 1.1 ws overlap->lf_type == F_WRLCK)
658 1.1 ws lf_wakelock(overlap);
659 1.1 ws overlap->lf_type = lock->lf_type;
660 1.38 christos lf_free(lock);
661 1.1 ws lock = overlap; /* for debug output below */
662 1.1 ws break;
663 1.1 ws
664 1.1 ws case 2: /* overlap contains lock */
665 1.1 ws /*
666 1.1 ws * Check for common starting point and different types.
667 1.1 ws */
668 1.1 ws if (overlap->lf_type == lock->lf_type) {
669 1.38 christos lf_free(lock);
670 1.1 ws lock = overlap; /* for debug output below */
671 1.1 ws break;
672 1.1 ws }
673 1.1 ws if (overlap->lf_start == lock->lf_start) {
674 1.1 ws *prev = lock;
675 1.1 ws lock->lf_next = overlap;
676 1.1 ws overlap->lf_start = lock->lf_end + 1;
677 1.1 ws } else
678 1.27 yamt lf_split(overlap, lock, sparelock);
679 1.1 ws lf_wakelock(overlap);
680 1.1 ws break;
681 1.1 ws
682 1.1 ws case 3: /* lock contains overlap */
683 1.1 ws /*
684 1.1 ws * If downgrading lock, others may be able to
685 1.1 ws * acquire it, otherwise take the list.
686 1.1 ws */
687 1.1 ws if (lock->lf_type == F_RDLCK &&
688 1.1 ws overlap->lf_type == F_WRLCK) {
689 1.1 ws lf_wakelock(overlap);
690 1.1 ws } else {
691 1.19 matt while ((ltmp = TAILQ_FIRST(&overlap->lf_blkhd))) {
692 1.16 sommerfe KASSERT(ltmp->lf_next == overlap);
693 1.12 fvdl TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
694 1.12 fvdl lf_block);
695 1.16 sommerfe ltmp->lf_next = lock;
696 1.12 fvdl TAILQ_INSERT_TAIL(&lock->lf_blkhd,
697 1.12 fvdl ltmp, lf_block);
698 1.12 fvdl }
699 1.1 ws }
700 1.1 ws /*
701 1.1 ws * Add the new lock if necessary and delete the overlap.
702 1.1 ws */
703 1.1 ws if (needtolink) {
704 1.1 ws *prev = lock;
705 1.1 ws lock->lf_next = overlap->lf_next;
706 1.1 ws prev = &lock->lf_next;
707 1.1 ws needtolink = 0;
708 1.1 ws } else
709 1.1 ws *prev = overlap->lf_next;
710 1.39 christos lf_free(overlap);
711 1.1 ws continue;
712 1.1 ws
713 1.1 ws case 4: /* overlap starts before lock */
714 1.1 ws /*
715 1.1 ws * Add lock after overlap on the list.
716 1.1 ws */
717 1.1 ws lock->lf_next = overlap->lf_next;
718 1.1 ws overlap->lf_next = lock;
719 1.1 ws overlap->lf_end = lock->lf_start - 1;
720 1.1 ws prev = &lock->lf_next;
721 1.1 ws lf_wakelock(overlap);
722 1.1 ws needtolink = 0;
723 1.1 ws continue;
724 1.1 ws
725 1.1 ws case 5: /* overlap ends after lock */
726 1.1 ws /*
727 1.1 ws * Add the new lock before overlap.
728 1.1 ws */
729 1.1 ws if (needtolink) {
730 1.1 ws *prev = lock;
731 1.1 ws lock->lf_next = overlap;
732 1.1 ws }
733 1.1 ws overlap->lf_start = lock->lf_end + 1;
734 1.1 ws lf_wakelock(overlap);
735 1.1 ws break;
736 1.1 ws }
737 1.1 ws break;
738 1.1 ws }
739 1.1 ws #ifdef LOCKF_DEBUG
740 1.1 ws if (lockf_debug & 1) {
741 1.1 ws lf_print("lf_setlock: got the lock", lock);
742 1.1 ws lf_printlist("lf_setlock", lock);
743 1.1 ws }
744 1.1 ws #endif /* LOCKF_DEBUG */
745 1.29 yamt return 0;
746 1.1 ws }
747 1.1 ws
748 1.1 ws /*
749 1.1 ws * Check whether there is a blocking lock,
750 1.1 ws * and if so return its process identifier.
751 1.1 ws */
752 1.24 yamt static int
753 1.25 yamt lf_getlock(struct lockf *lock, struct flock *fl)
754 1.1 ws {
755 1.15 augustss struct lockf *block;
756 1.1 ws
757 1.1 ws #ifdef LOCKF_DEBUG
758 1.1 ws if (lockf_debug & 1)
759 1.1 ws lf_print("lf_getlock", lock);
760 1.1 ws #endif /* LOCKF_DEBUG */
761 1.1 ws
762 1.7 christos if ((block = lf_getblock(lock)) != NULL) {
763 1.1 ws fl->l_type = block->lf_type;
764 1.1 ws fl->l_whence = SEEK_SET;
765 1.1 ws fl->l_start = block->lf_start;
766 1.1 ws if (block->lf_end == -1)
767 1.1 ws fl->l_len = 0;
768 1.1 ws else
769 1.1 ws fl->l_len = block->lf_end - block->lf_start + 1;
770 1.1 ws if (block->lf_flags & F_POSIX)
771 1.23 mycroft fl->l_pid = ((struct proc *)block->lf_id)->p_pid;
772 1.1 ws else
773 1.1 ws fl->l_pid = -1;
774 1.1 ws } else {
775 1.1 ws fl->l_type = F_UNLCK;
776 1.1 ws }
777 1.29 yamt return 0;
778 1.1 ws }
779 1.1 ws
780 1.1 ws /*
781 1.45 thorpej * Do an advisory lock operation.
782 1.1 ws */
783 1.45 thorpej int
784 1.45 thorpej lf_advlock(struct vop_advlock_args *ap, struct lockf **head, off_t size)
785 1.1 ws {
786 1.45 thorpej struct proc *p = curproc;
787 1.45 thorpej struct flock *fl = ap->a_fl;
788 1.45 thorpej struct lockf *lock = NULL;
789 1.45 thorpej struct lockf *sparelock;
790 1.45 thorpej struct simplelock *interlock = &ap->a_vp->v_interlock;
791 1.45 thorpej off_t start, end;
792 1.45 thorpej int error = 0;
793 1.1 ws
794 1.45 thorpej /*
795 1.45 thorpej * Convert the flock structure into a start and end.
796 1.45 thorpej */
797 1.45 thorpej switch (fl->l_whence) {
798 1.45 thorpej case SEEK_SET:
799 1.45 thorpej case SEEK_CUR:
800 1.1 ws /*
801 1.45 thorpej * Caller is responsible for adding any necessary offset
802 1.45 thorpej * when SEEK_CUR is used.
803 1.1 ws */
804 1.45 thorpej start = fl->l_start;
805 1.45 thorpej break;
806 1.45 thorpej
807 1.45 thorpej case SEEK_END:
808 1.45 thorpej start = size + fl->l_start;
809 1.45 thorpej break;
810 1.45 thorpej
811 1.45 thorpej default:
812 1.45 thorpej return EINVAL;
813 1.1 ws }
814 1.45 thorpej if (start < 0)
815 1.45 thorpej return EINVAL;
816 1.1 ws
817 1.45 thorpej /*
818 1.45 thorpej * allocate locks before acquire simple lock.
819 1.45 thorpej * we need two locks in the worst case.
820 1.45 thorpej */
821 1.45 thorpej switch (ap->a_op) {
822 1.45 thorpej case F_SETLK:
823 1.45 thorpej case F_UNLCK:
824 1.1 ws /*
825 1.45 thorpej * XXX for F_UNLCK case, we can re-use lock.
826 1.1 ws */
827 1.46 christos if ((ap->a_flags & F_FLOCK) == 0) {
828 1.45 thorpej /*
829 1.45 thorpej * byte-range lock might need one more lock.
830 1.45 thorpej */
831 1.49 elad sparelock = lf_alloc(kauth_cred_geteuid(p->p_cred), 0);
832 1.45 thorpej if (sparelock == NULL) {
833 1.45 thorpej error = ENOMEM;
834 1.45 thorpej goto quit;
835 1.45 thorpej }
836 1.45 thorpej break;
837 1.1 ws }
838 1.45 thorpej /* FALLTHROUGH */
839 1.45 thorpej
840 1.45 thorpej case F_GETLK:
841 1.45 thorpej sparelock = NULL;
842 1.45 thorpej break;
843 1.45 thorpej
844 1.45 thorpej default:
845 1.45 thorpej return EINVAL;
846 1.45 thorpej }
847 1.45 thorpej
848 1.49 elad lock = lf_alloc(kauth_cred_geteuid(p->p_cred), ap->a_op != F_UNLCK ? 1 : 2);
849 1.45 thorpej if (lock == NULL) {
850 1.45 thorpej error = ENOMEM;
851 1.45 thorpej goto quit;
852 1.1 ws }
853 1.1 ws
854 1.45 thorpej simple_lock(interlock);
855 1.1 ws
856 1.1 ws /*
857 1.45 thorpej * Avoid the common case of unlocking when inode has no locks.
858 1.1 ws */
859 1.45 thorpej if (*head == (struct lockf *)0) {
860 1.45 thorpej if (ap->a_op != F_SETLK) {
861 1.45 thorpej fl->l_type = F_UNLCK;
862 1.45 thorpej error = 0;
863 1.45 thorpej goto quit_unlock;
864 1.45 thorpej }
865 1.1 ws }
866 1.45 thorpej
867 1.45 thorpej if (fl->l_len == 0)
868 1.45 thorpej end = -1;
869 1.45 thorpej else
870 1.45 thorpej end = start + fl->l_len - 1;
871 1.1 ws /*
872 1.45 thorpej * Create the lockf structure.
873 1.45 thorpej */
874 1.45 thorpej lock->lf_start = start;
875 1.45 thorpej lock->lf_end = end;
876 1.45 thorpej /* XXX NJWLWP
877 1.45 thorpej * I don't want to make the entire VFS universe use LWPs, because
878 1.45 thorpej * they don't need them, for the most part. This is an exception,
879 1.45 thorpej * and a kluge.
880 1.1 ws */
881 1.45 thorpej
882 1.45 thorpej lock->lf_head = head;
883 1.45 thorpej lock->lf_type = fl->l_type;
884 1.45 thorpej lock->lf_next = (struct lockf *)0;
885 1.45 thorpej TAILQ_INIT(&lock->lf_blkhd);
886 1.45 thorpej lock->lf_flags = ap->a_flags;
887 1.45 thorpej if (lock->lf_flags & F_POSIX) {
888 1.45 thorpej KASSERT(curproc == (struct proc *)ap->a_id);
889 1.45 thorpej }
890 1.45 thorpej lock->lf_id = (struct proc *)ap->a_id;
891 1.45 thorpej lock->lf_lwp = curlwp;
892 1.45 thorpej
893 1.1 ws /*
894 1.45 thorpej * Do the requested operation.
895 1.1 ws */
896 1.45 thorpej switch (ap->a_op) {
897 1.1 ws
898 1.45 thorpej case F_SETLK:
899 1.45 thorpej error = lf_setlock(lock, &sparelock, interlock);
900 1.45 thorpej lock = NULL; /* lf_setlock freed it */
901 1.45 thorpej break;
902 1.1 ws
903 1.45 thorpej case F_UNLCK:
904 1.45 thorpej error = lf_clearlock(lock, &sparelock);
905 1.45 thorpej break;
906 1.1 ws
907 1.45 thorpej case F_GETLK:
908 1.45 thorpej error = lf_getlock(lock, fl);
909 1.45 thorpej break;
910 1.37 perry
911 1.45 thorpej default:
912 1.45 thorpej break;
913 1.45 thorpej /* NOTREACHED */
914 1.45 thorpej }
915 1.1 ws
916 1.45 thorpej quit_unlock:
917 1.45 thorpej simple_unlock(interlock);
918 1.45 thorpej quit:
919 1.45 thorpej if (lock)
920 1.45 thorpej lf_free(lock);
921 1.45 thorpej if (sparelock)
922 1.45 thorpej lf_free(sparelock);
923 1.1 ws
924 1.45 thorpej return error;
925 1.1 ws }
926