kern_lock.c revision 1.6 1 /* $NetBSD: kern_lock.c,v 1.6 1998/03/01 02:22:29 fvdl Exp $ */
2
3 /*
4 * Copyright (c) 1995
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code contains ideas from software contributed to Berkeley by
8 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
9 * System project at Carnegie-Mellon University.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
40 */
41
42 #include <sys/param.h>
43 #include <sys/proc.h>
44 #include <sys/lock.h>
45 #include <sys/systm.h>
46 #include <machine/cpu.h>
47
48 /*
49 * Locking primitives implementation.
50 * Locks provide shared/exclusive sychronization.
51 */
52
53 #ifdef LOCKDEBUG
54 #define COUNT(p, x) if (p) (p)->p_locks += (x)
55 #else
56 #define COUNT(p, x)
57 #endif
58
59 #if NCPUS > 1
60
61 /*
62 * For multiprocessor system, try spin lock first.
63 *
64 * This should be inline expanded below, but we cannot have #if
65 * inside a multiline define.
66 */
67 int lock_wait_time = 100;
68 #define PAUSE(lkp, wanted) \
69 if (lock_wait_time > 0) { \
70 int i; \
71 \
72 simple_unlock(&lkp->lk_interlock); \
73 for (i = lock_wait_time; i > 0; i--) \
74 if (!(wanted)) \
75 break; \
76 simple_lock(&lkp->lk_interlock); \
77 } \
78 if (!(wanted)) \
79 break;
80
81 #else /* NCPUS == 1 */
82
83 /*
84 * It is an error to spin on a uniprocessor as nothing will ever cause
85 * the simple lock to clear while we are executing.
86 */
87 #define PAUSE(lkp, wanted)
88
89 #endif /* NCPUS == 1 */
90
91 /*
92 * Acquire a resource.
93 */
94 #define ACQUIRE(lkp, error, extflags, wanted) \
95 PAUSE(lkp, wanted); \
96 for (error = 0; wanted; ) { \
97 (lkp)->lk_waitcount++; \
98 simple_unlock(&(lkp)->lk_interlock); \
99 error = tsleep((void *)lkp, (lkp)->lk_prio, \
100 (lkp)->lk_wmesg, (lkp)->lk_timo); \
101 simple_lock(&(lkp)->lk_interlock); \
102 (lkp)->lk_waitcount--; \
103 if (error) \
104 break; \
105 if ((extflags) & LK_SLEEPFAIL) { \
106 error = ENOLCK; \
107 break; \
108 } \
109 }
110
111 /*
112 * Initialize a lock; required before use.
113 */
114 void
115 lockinit(lkp, prio, wmesg, timo, flags)
116 struct lock *lkp;
117 int prio;
118 const char *wmesg;
119 int timo;
120 int flags;
121 {
122
123 bzero(lkp, sizeof(struct lock));
124 simple_lock_init(&lkp->lk_interlock);
125 lkp->lk_flags = flags & LK_EXTFLG_MASK;
126 lkp->lk_prio = prio;
127 lkp->lk_timo = timo;
128 lkp->lk_wmesg = wmesg;
129 lkp->lk_lockholder = LK_NOPROC;
130 }
131
132 /*
133 * Determine the status of a lock.
134 */
135 int
136 lockstatus(lkp)
137 struct lock *lkp;
138 {
139 int lock_type = 0;
140
141 simple_lock(&lkp->lk_interlock);
142 if (lkp->lk_exclusivecount != 0)
143 lock_type = LK_EXCLUSIVE;
144 else if (lkp->lk_sharecount != 0)
145 lock_type = LK_SHARED;
146 simple_unlock(&lkp->lk_interlock);
147 return (lock_type);
148 }
149
150 /*
151 * Set, change, or release a lock.
152 *
153 * Shared requests increment the shared count. Exclusive requests set the
154 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
155 * accepted shared locks and shared-to-exclusive upgrades to go away.
156 */
157 int
158 lockmgr(lkp, flags, interlkp)
159 __volatile struct lock *lkp;
160 u_int flags;
161 struct simplelock *interlkp;
162 {
163 int error;
164 pid_t pid;
165 int extflags;
166 struct proc *p = curproc;
167
168 error = 0;
169 if (p)
170 pid = p->p_pid;
171 else
172 pid = LK_KERNPROC;
173 simple_lock(&lkp->lk_interlock);
174 if (flags & LK_INTERLOCK)
175 simple_unlock(interlkp);
176 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
177 #ifdef DIAGNOSTIC
178 /*
179 * Once a lock has drained, the LK_DRAINING flag is set and an
180 * exclusive lock is returned. The only valid operation thereafter
181 * is a single release of that exclusive lock. This final release
182 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
183 * further requests of any sort will result in a panic. The bits
184 * selected for these two flags are chosen so that they will be set
185 * in memory that is freed (freed memory is filled with 0xdeadbeef).
186 * The final release is permitted to give a new lease on life to
187 * the lock by specifying LK_REENABLE.
188 */
189 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
190 if (lkp->lk_flags & LK_DRAINED)
191 panic("lockmgr: using decommissioned lock");
192 if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
193 lkp->lk_lockholder != pid)
194 panic("lockmgr: non-release on draining lock: %d\n",
195 flags & LK_TYPE_MASK);
196 lkp->lk_flags &= ~LK_DRAINING;
197 if ((flags & LK_REENABLE) == 0)
198 lkp->lk_flags |= LK_DRAINED;
199 }
200 #endif DIAGNOSTIC
201
202 switch (flags & LK_TYPE_MASK) {
203
204 case LK_SHARED:
205 if (lkp->lk_lockholder != pid) {
206 /*
207 * If just polling, check to see if we will block.
208 */
209 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
210 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
211 error = EBUSY;
212 break;
213 }
214 /*
215 * Wait for exclusive locks and upgrades to clear.
216 */
217 ACQUIRE(lkp, error, extflags, lkp->lk_flags &
218 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
219 if (error)
220 break;
221 lkp->lk_sharecount++;
222 COUNT(p, 1);
223 break;
224 }
225 /*
226 * We hold an exclusive lock, so downgrade it to shared.
227 * An alternative would be to fail with EDEADLK.
228 */
229 lkp->lk_sharecount++;
230 COUNT(p, 1);
231 /* fall into downgrade */
232
233 case LK_DOWNGRADE:
234 if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
235 panic("lockmgr: not holding exclusive lock");
236 lkp->lk_sharecount += lkp->lk_exclusivecount;
237 lkp->lk_exclusivecount = 0;
238 lkp->lk_flags &= ~LK_HAVE_EXCL;
239 lkp->lk_lockholder = LK_NOPROC;
240 if (lkp->lk_waitcount)
241 wakeup((void *)lkp);
242 break;
243
244 case LK_EXCLUPGRADE:
245 /*
246 * If another process is ahead of us to get an upgrade,
247 * then we want to fail rather than have an intervening
248 * exclusive access.
249 */
250 if (lkp->lk_flags & LK_WANT_UPGRADE) {
251 lkp->lk_sharecount--;
252 COUNT(p, -1);
253 error = EBUSY;
254 break;
255 }
256 /* fall into normal upgrade */
257
258 case LK_UPGRADE:
259 /*
260 * Upgrade a shared lock to an exclusive one. If another
261 * shared lock has already requested an upgrade to an
262 * exclusive lock, our shared lock is released and an
263 * exclusive lock is requested (which will be granted
264 * after the upgrade). If we return an error, the file
265 * will always be unlocked.
266 */
267 if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
268 panic("lockmgr: upgrade exclusive lock");
269 lkp->lk_sharecount--;
270 COUNT(p, -1);
271 /*
272 * If we are just polling, check to see if we will block.
273 */
274 if ((extflags & LK_NOWAIT) &&
275 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
276 lkp->lk_sharecount > 1)) {
277 error = EBUSY;
278 break;
279 }
280 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
281 /*
282 * We are first shared lock to request an upgrade, so
283 * request upgrade and wait for the shared count to
284 * drop to zero, then take exclusive lock.
285 */
286 lkp->lk_flags |= LK_WANT_UPGRADE;
287 ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
288 lkp->lk_flags &= ~LK_WANT_UPGRADE;
289 if (error)
290 break;
291 lkp->lk_flags |= LK_HAVE_EXCL;
292 lkp->lk_lockholder = pid;
293 if (lkp->lk_exclusivecount != 0)
294 panic("lockmgr: non-zero exclusive count");
295 lkp->lk_exclusivecount = 1;
296 COUNT(p, 1);
297 break;
298 }
299 /*
300 * Someone else has requested upgrade. Release our shared
301 * lock, awaken upgrade requestor if we are the last shared
302 * lock, then request an exclusive lock.
303 */
304 if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
305 wakeup((void *)lkp);
306 /* fall into exclusive request */
307
308 case LK_EXCLUSIVE:
309 if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) {
310 /*
311 * Recursive lock.
312 */
313 if ((extflags & LK_CANRECURSE) == 0)
314 panic("lockmgr: locking against myself");
315 lkp->lk_exclusivecount++;
316 COUNT(p, 1);
317 break;
318 }
319 /*
320 * If we are just polling, check to see if we will sleep.
321 */
322 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
323 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
324 lkp->lk_sharecount != 0)) {
325 error = EBUSY;
326 break;
327 }
328 /*
329 * Try to acquire the want_exclusive flag.
330 */
331 ACQUIRE(lkp, error, extflags, lkp->lk_flags &
332 (LK_HAVE_EXCL | LK_WANT_EXCL));
333 if (error)
334 break;
335 lkp->lk_flags |= LK_WANT_EXCL;
336 /*
337 * Wait for shared locks and upgrades to finish.
338 */
339 ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
340 (lkp->lk_flags & LK_WANT_UPGRADE));
341 lkp->lk_flags &= ~LK_WANT_EXCL;
342 if (error)
343 break;
344 lkp->lk_flags |= LK_HAVE_EXCL;
345 lkp->lk_lockholder = pid;
346 if (lkp->lk_exclusivecount != 0)
347 panic("lockmgr: non-zero exclusive count");
348 lkp->lk_exclusivecount = 1;
349 COUNT(p, 1);
350 break;
351
352 case LK_RELEASE:
353 if (lkp->lk_exclusivecount != 0) {
354 if (pid != lkp->lk_lockholder)
355 panic("lockmgr: pid %d, not %s %d unlocking",
356 pid, "exclusive lock holder",
357 lkp->lk_lockholder);
358 lkp->lk_exclusivecount--;
359 COUNT(p, -1);
360 if (lkp->lk_exclusivecount == 0) {
361 lkp->lk_flags &= ~LK_HAVE_EXCL;
362 lkp->lk_lockholder = LK_NOPROC;
363 }
364 } else if (lkp->lk_sharecount != 0) {
365 lkp->lk_sharecount--;
366 COUNT(p, -1);
367 }
368 if (lkp->lk_waitcount)
369 wakeup((void *)lkp);
370 break;
371
372 case LK_DRAIN:
373 /*
374 * Check that we do not already hold the lock, as it can
375 * never drain if we do. Unfortunately, we have no way to
376 * check for holding a shared lock, but at least we can
377 * check for an exclusive one.
378 */
379 if (lkp->lk_lockholder == pid)
380 panic("lockmgr: draining against myself");
381 /*
382 * If we are just polling, check to see if we will sleep.
383 */
384 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
385 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
386 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
387 error = EBUSY;
388 break;
389 }
390 PAUSE(lkp, ((lkp->lk_flags &
391 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
392 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0));
393 for (error = 0; ((lkp->lk_flags &
394 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
395 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
396 lkp->lk_flags |= LK_WAITDRAIN;
397 simple_unlock(&lkp->lk_interlock);
398 if ((error = tsleep((void *)&lkp->lk_flags,
399 lkp->lk_prio, lkp->lk_wmesg, lkp->lk_timo)))
400 return (error);
401 if ((extflags) & LK_SLEEPFAIL)
402 return (ENOLCK);
403 simple_lock(&lkp->lk_interlock);
404 }
405 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
406 lkp->lk_lockholder = pid;
407 lkp->lk_exclusivecount = 1;
408 COUNT(p, 1);
409 break;
410
411 default:
412 simple_unlock(&lkp->lk_interlock);
413 panic("lockmgr: unknown locktype request %d",
414 flags & LK_TYPE_MASK);
415 /* NOTREACHED */
416 }
417 if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
418 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
419 lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
420 lkp->lk_flags &= ~LK_WAITDRAIN;
421 wakeup((void *)&lkp->lk_flags);
422 }
423 simple_unlock(&lkp->lk_interlock);
424 return (error);
425 }
426
427 /*
428 * Print out information about state of a lock. Used by VOP_PRINT
429 * routines to display ststus about contained locks.
430 */
431 void
432 lockmgr_printinfo(lkp)
433 struct lock *lkp;
434 {
435
436 if (lkp->lk_sharecount)
437 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
438 lkp->lk_sharecount);
439 else if (lkp->lk_flags & LK_HAVE_EXCL)
440 printf(" lock type %s: EXCL (count %d) by pid %d",
441 lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
442 if (lkp->lk_waitcount > 0)
443 printf(" with %d pending", lkp->lk_waitcount);
444 }
445
446 #if defined(LOCKDEBUG) && NCPUS == 1
447 #include <sys/kernel.h>
448 #include <vm/vm.h>
449 #include <sys/sysctl.h>
450 int lockpausetime = 0;
451 struct ctldebug debug2 = { "lockpausetime", &lockpausetime };
452 int simplelockrecurse;
453 /*
454 * Simple lock functions so that the debugger can see from whence
455 * they are being called.
456 */
457 void
458 simple_lock_init(alp)
459 struct simplelock *alp;
460 {
461 alp->lock_data = 0;
462 alp->lock_file = NULL;
463 alp->lock_line = 0;
464 alp->unlock_file = NULL;
465 alp->unlock_line = 0;
466 }
467
468 void
469 _simple_lock(alp, id, l)
470 __volatile struct simplelock *alp;
471 const char *id;
472 int l;
473 {
474 if (simplelockrecurse)
475 return;
476 if (alp->lock_data == 1) {
477 printf("simple_lock: lock held\n");
478 printf("currently at: %s:%d\n", id, l);
479 printf("last locked: %s:%d\n",
480 alp->lock_file, alp->lock_line);
481 printf("last unlocked: %s:%d\n",
482 alp->unlock_file, alp->unlock_line);
483 if (lockpausetime == -1)
484 panic("simple_lock: lock held");
485 if (lockpausetime == 1) {
486 #ifdef BACKTRACE
487 BACKTRACE(curproc);
488 #endif
489 } else if (lockpausetime > 1) {
490 printf("simple_lock: lock held, pausing...");
491 tsleep(&lockpausetime, PCATCH | PPAUSE, "slock",
492 lockpausetime * hz);
493 printf(" continuing\n");
494 }
495 }
496 alp->lock_data = 1;
497 alp->lock_file = id;
498 alp->lock_line = l;
499 if (curproc)
500 curproc->p_simple_locks++;
501 }
502
503 int
504 _simple_lock_try(alp, id, l)
505 __volatile struct simplelock *alp;
506 const char *id;
507 int l;
508 {
509
510 if (alp->lock_data)
511 return (0);
512 if (simplelockrecurse)
513 return (1);
514 alp->lock_data = 1;
515 alp->lock_file = id;
516 alp->lock_line = l;
517 if (curproc)
518 curproc->p_simple_locks++;
519 return (1);
520 }
521
522 void
523 _simple_unlock(alp, id, l)
524 __volatile struct simplelock *alp;
525 const char *id;
526 int l;
527 {
528
529 if (simplelockrecurse)
530 return;
531 if (alp->lock_data == 0) {
532 printf("simple_unlock: lock not held\n");
533 printf("currently at: %s:%d\n", id, l);
534 printf("last locked: %s:%d\n",
535 alp->lock_file, alp->lock_line);
536 printf("last unlocked: %s:%d\n",
537 alp->unlock_file, alp->unlock_line);
538 if (lockpausetime == -1)
539 panic("simple_unlock: lock not held");
540 if (lockpausetime == 1) {
541 #ifdef BACKTRACE
542 BACKTRACE(curproc);
543 #endif
544 } else if (lockpausetime > 1) {
545 printf("simple_unlock: lock not held, pausing...");
546 tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock",
547 lockpausetime * hz);
548 printf(" continuing\n");
549 }
550 }
551 alp->lock_data = 0;
552 alp->unlock_file = id;
553 alp->unlock_line = l;
554 if (curproc)
555 curproc->p_simple_locks--;
556 }
557 #endif /* LOCKDEBUG && NCPUS == 1 */
558