kern_lock.c revision 1.1 1 /*
2 * Copyright (c) 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code contains ideas from software contributed to Berkeley by
6 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
7 * System project at Carnegie-Mellon University.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
38 */
39
40 #include <sys/param.h>
41 #include <sys/proc.h>
42 #include <sys/lock.h>
43 #include <machine/cpu.h>
44
45 /*
46 * Locking primitives implementation.
47 * Locks provide shared/exclusive sychronization.
48 */
49
50 #ifdef DEBUG
51 #define COUNT(p, x) if (p) (p)->p_locks += (x)
52 #else
53 #define COUNT(p, x)
54 #endif
55
56 #if NCPUS > 1
57
58 /*
59 * For multiprocessor system, try spin lock first.
60 *
61 * This should be inline expanded below, but we cannot have #if
62 * inside a multiline define.
63 */
64 int lock_wait_time = 100;
65 #define PAUSE(lkp, wanted) \
66 if (lock_wait_time > 0) { \
67 int i; \
68 \
69 simple_unlock(&lkp->lk_interlock); \
70 for (i = lock_wait_time; i > 0; i--) \
71 if (!(wanted)) \
72 break; \
73 simple_lock(&lkp->lk_interlock); \
74 } \
75 if (!(wanted)) \
76 break;
77
78 #else /* NCPUS == 1 */
79
80 /*
81 * It is an error to spin on a uniprocessor as nothing will ever cause
82 * the simple lock to clear while we are executing.
83 */
84 #define PAUSE(lkp, wanted)
85
86 #endif /* NCPUS == 1 */
87
88 /*
89 * Acquire a resource.
90 */
91 #define ACQUIRE(lkp, error, extflags, wanted) \
92 PAUSE(lkp, wanted); \
93 for (error = 0; wanted; ) { \
94 (lkp)->lk_waitcount++; \
95 simple_unlock(&(lkp)->lk_interlock); \
96 error = tsleep((void *)lkp, (lkp)->lk_prio, \
97 (lkp)->lk_wmesg, (lkp)->lk_timo); \
98 simple_lock(&(lkp)->lk_interlock); \
99 (lkp)->lk_waitcount--; \
100 if (error) \
101 break; \
102 if ((extflags) & LK_SLEEPFAIL) { \
103 error = ENOLCK; \
104 break; \
105 } \
106 }
107
108 /*
109 * Initialize a lock; required before use.
110 */
111 void
112 lockinit(lkp, prio, wmesg, timo, flags)
113 struct lock *lkp;
114 int prio;
115 char *wmesg;
116 int timo;
117 int flags;
118 {
119
120 bzero(lkp, sizeof(struct lock));
121 simple_lock_init(&lkp->lk_interlock);
122 lkp->lk_flags = flags & LK_EXTFLG_MASK;
123 lkp->lk_prio = prio;
124 lkp->lk_timo = timo;
125 lkp->lk_wmesg = wmesg;
126 lkp->lk_lockholder = LK_NOPROC;
127 }
128
129 /*
130 * Determine the status of a lock.
131 */
132 int
133 lockstatus(lkp)
134 struct lock *lkp;
135 {
136 int lock_type = 0;
137
138 simple_lock(&lkp->lk_interlock);
139 if (lkp->lk_exclusivecount != 0)
140 lock_type = LK_EXCLUSIVE;
141 else if (lkp->lk_sharecount != 0)
142 lock_type = LK_SHARED;
143 simple_unlock(&lkp->lk_interlock);
144 return (lock_type);
145 }
146
147 /*
148 * Set, change, or release a lock.
149 *
150 * Shared requests increment the shared count. Exclusive requests set the
151 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
152 * accepted shared locks and shared-to-exclusive upgrades to go away.
153 */
154 int
155 lockmgr(lkp, flags, interlkp, p)
156 __volatile struct lock *lkp;
157 u_int flags;
158 struct simplelock *interlkp;
159 struct proc *p;
160 {
161 int error;
162 pid_t pid;
163 int extflags;
164
165 error = 0;
166 if (p)
167 pid = p->p_pid;
168 else
169 pid = LK_KERNPROC;
170 simple_lock(&lkp->lk_interlock);
171 if (flags & LK_INTERLOCK)
172 simple_unlock(interlkp);
173 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
174 #ifdef DIAGNOSTIC
175 /*
176 * Once a lock has drained, the LK_DRAINING flag is set and an
177 * exclusive lock is returned. The only valid operation thereafter
178 * is a single release of that exclusive lock. This final release
179 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
180 * further requests of any sort will result in a panic. The bits
181 * selected for these two flags are chosen so that they will be set
182 * in memory that is freed (freed memory is filled with 0xdeadbeef).
183 * The final release is permitted to give a new lease on life to
184 * the lock by specifying LK_REENABLE.
185 */
186 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
187 if (lkp->lk_flags & LK_DRAINED)
188 panic("lockmgr: using decommissioned lock");
189 if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
190 lkp->lk_lockholder != pid)
191 panic("lockmgr: non-release on draining lock: %d\n",
192 flags & LK_TYPE_MASK);
193 lkp->lk_flags &= ~LK_DRAINING;
194 if ((flags & LK_REENABLE) == 0)
195 lkp->lk_flags |= LK_DRAINED;
196 }
197 #endif DIAGNOSTIC
198
199 switch (flags & LK_TYPE_MASK) {
200
201 case LK_SHARED:
202 if (lkp->lk_lockholder != pid) {
203 /*
204 * If just polling, check to see if we will block.
205 */
206 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
207 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
208 error = EBUSY;
209 break;
210 }
211 /*
212 * Wait for exclusive locks and upgrades to clear.
213 */
214 ACQUIRE(lkp, error, extflags, lkp->lk_flags &
215 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
216 if (error)
217 break;
218 lkp->lk_sharecount++;
219 COUNT(p, 1);
220 break;
221 }
222 /*
223 * We hold an exclusive lock, so downgrade it to shared.
224 * An alternative would be to fail with EDEADLK.
225 */
226 lkp->lk_sharecount++;
227 COUNT(p, 1);
228 /* fall into downgrade */
229
230 case LK_DOWNGRADE:
231 if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
232 panic("lockmgr: not holding exclusive lock");
233 lkp->lk_sharecount += lkp->lk_exclusivecount;
234 lkp->lk_exclusivecount = 0;
235 lkp->lk_flags &= ~LK_HAVE_EXCL;
236 lkp->lk_lockholder = LK_NOPROC;
237 if (lkp->lk_waitcount)
238 wakeup((void *)lkp);
239 break;
240
241 case LK_EXCLUPGRADE:
242 /*
243 * If another process is ahead of us to get an upgrade,
244 * then we want to fail rather than have an intervening
245 * exclusive access.
246 */
247 if (lkp->lk_flags & LK_WANT_UPGRADE) {
248 lkp->lk_sharecount--;
249 COUNT(p, -1);
250 error = EBUSY;
251 break;
252 }
253 /* fall into normal upgrade */
254
255 case LK_UPGRADE:
256 /*
257 * Upgrade a shared lock to an exclusive one. If another
258 * shared lock has already requested an upgrade to an
259 * exclusive lock, our shared lock is released and an
260 * exclusive lock is requested (which will be granted
261 * after the upgrade). If we return an error, the file
262 * will always be unlocked.
263 */
264 if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
265 panic("lockmgr: upgrade exclusive lock");
266 lkp->lk_sharecount--;
267 COUNT(p, -1);
268 /*
269 * If we are just polling, check to see if we will block.
270 */
271 if ((extflags & LK_NOWAIT) &&
272 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
273 lkp->lk_sharecount > 1)) {
274 error = EBUSY;
275 break;
276 }
277 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
278 /*
279 * We are first shared lock to request an upgrade, so
280 * request upgrade and wait for the shared count to
281 * drop to zero, then take exclusive lock.
282 */
283 lkp->lk_flags |= LK_WANT_UPGRADE;
284 ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
285 lkp->lk_flags &= ~LK_WANT_UPGRADE;
286 if (error)
287 break;
288 lkp->lk_flags |= LK_HAVE_EXCL;
289 lkp->lk_lockholder = pid;
290 if (lkp->lk_exclusivecount != 0)
291 panic("lockmgr: non-zero exclusive count");
292 lkp->lk_exclusivecount = 1;
293 COUNT(p, 1);
294 break;
295 }
296 /*
297 * Someone else has requested upgrade. Release our shared
298 * lock, awaken upgrade requestor if we are the last shared
299 * lock, then request an exclusive lock.
300 */
301 if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
302 wakeup((void *)lkp);
303 /* fall into exclusive request */
304
305 case LK_EXCLUSIVE:
306 if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) {
307 /*
308 * Recursive lock.
309 */
310 if ((extflags & LK_CANRECURSE) == 0)
311 panic("lockmgr: locking against myself");
312 lkp->lk_exclusivecount++;
313 COUNT(p, 1);
314 break;
315 }
316 /*
317 * If we are just polling, check to see if we will sleep.
318 */
319 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
320 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
321 lkp->lk_sharecount != 0)) {
322 error = EBUSY;
323 break;
324 }
325 /*
326 * Try to acquire the want_exclusive flag.
327 */
328 ACQUIRE(lkp, error, extflags, lkp->lk_flags &
329 (LK_HAVE_EXCL | LK_WANT_EXCL));
330 if (error)
331 break;
332 lkp->lk_flags |= LK_WANT_EXCL;
333 /*
334 * Wait for shared locks and upgrades to finish.
335 */
336 ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
337 (lkp->lk_flags & LK_WANT_UPGRADE));
338 lkp->lk_flags &= ~LK_WANT_EXCL;
339 if (error)
340 break;
341 lkp->lk_flags |= LK_HAVE_EXCL;
342 lkp->lk_lockholder = pid;
343 if (lkp->lk_exclusivecount != 0)
344 panic("lockmgr: non-zero exclusive count");
345 lkp->lk_exclusivecount = 1;
346 COUNT(p, 1);
347 break;
348
349 case LK_RELEASE:
350 if (lkp->lk_exclusivecount != 0) {
351 if (pid != lkp->lk_lockholder)
352 panic("lockmgr: pid %d, not %s %d unlocking",
353 pid, "exclusive lock holder",
354 lkp->lk_lockholder);
355 lkp->lk_exclusivecount--;
356 COUNT(p, -1);
357 if (lkp->lk_exclusivecount == 0) {
358 lkp->lk_flags &= ~LK_HAVE_EXCL;
359 lkp->lk_lockholder = LK_NOPROC;
360 }
361 } else if (lkp->lk_sharecount != 0) {
362 lkp->lk_sharecount--;
363 COUNT(p, -1);
364 }
365 if (lkp->lk_waitcount)
366 wakeup((void *)lkp);
367 break;
368
369 case LK_DRAIN:
370 /*
371 * Check that we do not already hold the lock, as it can
372 * never drain if we do. Unfortunately, we have no way to
373 * check for holding a shared lock, but at least we can
374 * check for an exclusive one.
375 */
376 if (lkp->lk_lockholder == pid)
377 panic("lockmgr: draining against myself");
378 /*
379 * If we are just polling, check to see if we will sleep.
380 */
381 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
382 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
383 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
384 error = EBUSY;
385 break;
386 }
387 PAUSE(lkp, ((lkp->lk_flags &
388 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
389 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0));
390 for (error = 0; ((lkp->lk_flags &
391 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
392 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
393 lkp->lk_flags |= LK_WAITDRAIN;
394 simple_unlock(&lkp->lk_interlock);
395 if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio,
396 lkp->lk_wmesg, lkp->lk_timo))
397 return (error);
398 if ((extflags) & LK_SLEEPFAIL)
399 return (ENOLCK);
400 simple_lock(&lkp->lk_interlock);
401 }
402 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
403 lkp->lk_lockholder = pid;
404 lkp->lk_exclusivecount = 1;
405 COUNT(p, 1);
406 break;
407
408 default:
409 simple_unlock(&lkp->lk_interlock);
410 panic("lockmgr: unknown locktype request %d",
411 flags & LK_TYPE_MASK);
412 /* NOTREACHED */
413 }
414 if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
415 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
416 lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
417 lkp->lk_flags &= ~LK_WAITDRAIN;
418 wakeup((void *)&lkp->lk_flags);
419 }
420 simple_unlock(&lkp->lk_interlock);
421 return (error);
422 }
423
424 /*
425 * Print out information about state of a lock. Used by VOP_PRINT
426 * routines to display ststus about contained locks.
427 */
428 lockmgr_printinfo(lkp)
429 struct lock *lkp;
430 {
431
432 if (lkp->lk_sharecount)
433 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
434 lkp->lk_sharecount);
435 else if (lkp->lk_flags & LK_HAVE_EXCL)
436 printf(" lock type %s: EXCL (count %d) by pid %d",
437 lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
438 if (lkp->lk_waitcount > 0)
439 printf(" with %d pending", lkp->lk_waitcount);
440 }
441
442 #if defined(DEBUG) && NCPUS == 1
443 #include <sys/kernel.h>
444 #include <vm/vm.h>
445 #include <sys/sysctl.h>
446 int lockpausetime = 0;
447 struct ctldebug debug2 = { "lockpausetime", &lockpausetime };
448 int simplelockrecurse;
449 /*
450 * Simple lock functions so that the debugger can see from whence
451 * they are being called.
452 */
453 void
454 simple_lock_init(alp)
455 struct simplelock *alp;
456 {
457
458 alp->lock_data = 0;
459 }
460
461 void
462 _simple_lock(alp, id, l)
463 __volatile struct simplelock *alp;
464 const char *id;
465 int l;
466 {
467
468 if (simplelockrecurse)
469 return;
470 if (alp->lock_data == 1) {
471 if (lockpausetime == -1)
472 panic("%s:%d: simple_lock: lock held", id, l);
473 printf("%s:%d: simple_lock: lock held\n", id, l);
474 if (lockpausetime == 1) {
475 BACKTRACE(curproc);
476 } else if (lockpausetime > 1) {
477 printf("%s:%d: simple_lock: lock held...", id, l);
478 tsleep(&lockpausetime, PCATCH | PPAUSE, "slock",
479 lockpausetime * hz);
480 printf(" continuing\n");
481 }
482 }
483 alp->lock_data = 1;
484 if (curproc)
485 curproc->p_simple_locks++;
486 }
487
488 int
489 _simple_lock_try(alp, id, l)
490 __volatile struct simplelock *alp;
491 const char *id;
492 int l;
493 {
494
495 if (alp->lock_data)
496 return (0);
497 if (simplelockrecurse)
498 return (1);
499 alp->lock_data = 1;
500 if (curproc)
501 curproc->p_simple_locks++;
502 return (1);
503 }
504
505 void
506 _simple_unlock(alp, id, l)
507 __volatile struct simplelock *alp;
508 const char *id;
509 int l;
510 {
511
512 if (simplelockrecurse)
513 return;
514 if (alp->lock_data == 0) {
515 if (lockpausetime == -1)
516 panic("%s:%d: simple_unlock: lock not held", id, l);
517 printf("%s:%d: simple_unlock: lock not held\n", id, l);
518 if (lockpausetime == 1) {
519 BACKTRACE(curproc);
520 } else if (lockpausetime > 1) {
521 printf("%s:%d: simple_unlock: lock not held...", id, l);
522 tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock",
523 lockpausetime * hz);
524 printf(" continuing\n");
525 }
526 }
527 alp->lock_data = 0;
528 if (curproc)
529 curproc->p_simple_locks--;
530 }
531 #endif /* DEBUG && NCPUS == 1 */
532