kern_rwlock.c revision 1.51 1 1.51 ozaki /* $NetBSD: kern_rwlock.c,v 1.51 2018/08/14 01:09:53 ozaki-r Exp $ */
2 1.2 ad
3 1.2 ad /*-
4 1.29 ad * Copyright (c) 2002, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 1.2 ad * All rights reserved.
6 1.2 ad *
7 1.2 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 ad * by Jason R. Thorpe and Andrew Doran.
9 1.2 ad *
10 1.2 ad * Redistribution and use in source and binary forms, with or without
11 1.2 ad * modification, are permitted provided that the following conditions
12 1.2 ad * are met:
13 1.2 ad * 1. Redistributions of source code must retain the above copyright
14 1.2 ad * notice, this list of conditions and the following disclaimer.
15 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 ad * notice, this list of conditions and the following disclaimer in the
17 1.2 ad * documentation and/or other materials provided with the distribution.
18 1.2 ad *
19 1.2 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 ad * POSSIBILITY OF SUCH DAMAGE.
30 1.2 ad */
31 1.2 ad
32 1.2 ad /*
33 1.2 ad * Kernel reader/writer lock implementation, modeled after those
34 1.2 ad * found in Solaris, a description of which can be found in:
35 1.2 ad *
36 1.2 ad * Solaris Internals: Core Kernel Architecture, Jim Mauro and
37 1.2 ad * Richard McDougall.
38 1.2 ad */
39 1.2 ad
40 1.10 dsl #include <sys/cdefs.h>
41 1.51 ozaki __KERNEL_RCSID(0, "$NetBSD: kern_rwlock.c,v 1.51 2018/08/14 01:09:53 ozaki-r Exp $");
42 1.2 ad
43 1.2 ad #define __RWLOCK_PRIVATE
44 1.2 ad
45 1.2 ad #include <sys/param.h>
46 1.2 ad #include <sys/proc.h>
47 1.2 ad #include <sys/rwlock.h>
48 1.2 ad #include <sys/sched.h>
49 1.2 ad #include <sys/sleepq.h>
50 1.2 ad #include <sys/systm.h>
51 1.2 ad #include <sys/lockdebug.h>
52 1.11 ad #include <sys/cpu.h>
53 1.14 ad #include <sys/atomic.h>
54 1.15 ad #include <sys/lock.h>
55 1.51 ozaki #include <sys/pserialize.h>
56 1.2 ad
57 1.2 ad #include <dev/lockstat.h>
58 1.2 ad
59 1.2 ad /*
60 1.2 ad * LOCKDEBUG
61 1.2 ad */
62 1.2 ad
63 1.2 ad #if defined(LOCKDEBUG)
64 1.2 ad
65 1.40 mlelstv #define RW_WANTLOCK(rw, op) \
66 1.12 yamt LOCKDEBUG_WANTLOCK(RW_DEBUG_P(rw), (rw), \
67 1.40 mlelstv (uintptr_t)__builtin_return_address(0), op == RW_READER);
68 1.2 ad #define RW_LOCKED(rw, op) \
69 1.25 ad LOCKDEBUG_LOCKED(RW_DEBUG_P(rw), (rw), NULL, \
70 1.2 ad (uintptr_t)__builtin_return_address(0), op == RW_READER);
71 1.2 ad #define RW_UNLOCKED(rw, op) \
72 1.12 yamt LOCKDEBUG_UNLOCKED(RW_DEBUG_P(rw), (rw), \
73 1.2 ad (uintptr_t)__builtin_return_address(0), op == RW_READER);
74 1.2 ad #define RW_DASSERT(rw, cond) \
75 1.2 ad do { \
76 1.2 ad if (!(cond)) \
77 1.46 christos rw_abort(__func__, __LINE__, rw, "assertion failed: " #cond);\
78 1.2 ad } while (/* CONSTCOND */ 0);
79 1.2 ad
80 1.2 ad #else /* LOCKDEBUG */
81 1.2 ad
82 1.40 mlelstv #define RW_WANTLOCK(rw, op) /* nothing */
83 1.2 ad #define RW_LOCKED(rw, op) /* nothing */
84 1.2 ad #define RW_UNLOCKED(rw, op) /* nothing */
85 1.2 ad #define RW_DASSERT(rw, cond) /* nothing */
86 1.2 ad
87 1.2 ad #endif /* LOCKDEBUG */
88 1.2 ad
89 1.2 ad /*
90 1.2 ad * DIAGNOSTIC
91 1.2 ad */
92 1.2 ad
93 1.2 ad #if defined(DIAGNOSTIC)
94 1.2 ad
95 1.2 ad #define RW_ASSERT(rw, cond) \
96 1.2 ad do { \
97 1.2 ad if (!(cond)) \
98 1.46 christos rw_abort(__func__, __LINE__, rw, "assertion failed: " #cond);\
99 1.2 ad } while (/* CONSTCOND */ 0)
100 1.2 ad
101 1.2 ad #else
102 1.2 ad
103 1.2 ad #define RW_ASSERT(rw, cond) /* nothing */
104 1.2 ad
105 1.2 ad #endif /* DIAGNOSTIC */
106 1.2 ad
107 1.36 skrll #define RW_SETDEBUG(rw, on) ((rw)->rw_owner |= (on) ? 0 : RW_NODEBUG)
108 1.36 skrll #define RW_DEBUG_P(rw) (((rw)->rw_owner & RW_NODEBUG) == 0)
109 1.12 yamt #if defined(LOCKDEBUG)
110 1.44 matt #define RW_INHERITDEBUG(n, o) (n) |= (o) & RW_NODEBUG
111 1.12 yamt #else /* defined(LOCKDEBUG) */
112 1.44 matt #define RW_INHERITDEBUG(n, o) /* nothing */
113 1.12 yamt #endif /* defined(LOCKDEBUG) */
114 1.12 yamt
115 1.46 christos static void rw_abort(const char *, size_t, krwlock_t *, const char *);
116 1.47 christos static void rw_dump(const volatile void *);
117 1.20 ad static lwp_t *rw_owner(wchan_t);
118 1.20 ad
119 1.20 ad static inline uintptr_t
120 1.20 ad rw_cas(krwlock_t *rw, uintptr_t o, uintptr_t n)
121 1.12 yamt {
122 1.12 yamt
123 1.20 ad RW_INHERITDEBUG(n, o);
124 1.20 ad return (uintptr_t)atomic_cas_ptr((volatile void *)&rw->rw_owner,
125 1.20 ad (void *)o, (void *)n);
126 1.12 yamt }
127 1.2 ad
128 1.20 ad static inline void
129 1.20 ad rw_swap(krwlock_t *rw, uintptr_t o, uintptr_t n)
130 1.2 ad {
131 1.2 ad
132 1.20 ad RW_INHERITDEBUG(n, o);
133 1.20 ad n = (uintptr_t)atomic_swap_ptr((volatile void *)&rw->rw_owner,
134 1.20 ad (void *)n);
135 1.20 ad RW_DASSERT(rw, n == o);
136 1.2 ad }
137 1.2 ad
138 1.2 ad /*
139 1.2 ad * For platforms that do not provide stubs, or for the LOCKDEBUG case.
140 1.2 ad */
141 1.2 ad #ifdef LOCKDEBUG
142 1.2 ad #undef __HAVE_RW_STUBS
143 1.2 ad #endif
144 1.2 ad
145 1.2 ad #ifndef __HAVE_RW_STUBS
146 1.6 itohy __strong_alias(rw_enter,rw_vector_enter);
147 1.6 itohy __strong_alias(rw_exit,rw_vector_exit);
148 1.16 ad __strong_alias(rw_tryenter,rw_vector_tryenter);
149 1.2 ad #endif
150 1.2 ad
151 1.2 ad lockops_t rwlock_lockops = {
152 1.48 ozaki .lo_name = "Reader / writer lock",
153 1.48 ozaki .lo_type = LOCKOPS_SLEEP,
154 1.48 ozaki .lo_dump = rw_dump,
155 1.2 ad };
156 1.2 ad
157 1.4 yamt syncobj_t rw_syncobj = {
158 1.49 ozaki .sobj_flag = SOBJ_SLEEPQ_SORTED,
159 1.49 ozaki .sobj_unsleep = turnstile_unsleep,
160 1.49 ozaki .sobj_changepri = turnstile_changepri,
161 1.49 ozaki .sobj_lendpri = sleepq_lendpri,
162 1.49 ozaki .sobj_owner = rw_owner,
163 1.4 yamt };
164 1.4 yamt
165 1.2 ad /*
166 1.2 ad * rw_dump:
167 1.2 ad *
168 1.2 ad * Dump the contents of a rwlock structure.
169 1.2 ad */
170 1.11 ad static void
171 1.47 christos rw_dump(const volatile void *cookie)
172 1.2 ad {
173 1.47 christos const volatile krwlock_t *rw = cookie;
174 1.2 ad
175 1.2 ad printf_nolog("owner/count : %#018lx flags : %#018x\n",
176 1.2 ad (long)RW_OWNER(rw), (int)RW_FLAGS(rw));
177 1.2 ad }
178 1.2 ad
179 1.2 ad /*
180 1.11 ad * rw_abort:
181 1.11 ad *
182 1.11 ad * Dump information about an error and panic the system. This
183 1.11 ad * generates a lot of machine code in the DIAGNOSTIC case, so
184 1.11 ad * we ask the compiler to not inline it.
185 1.11 ad */
186 1.26 ad static void __noinline
187 1.46 christos rw_abort(const char *func, size_t line, krwlock_t *rw, const char *msg)
188 1.11 ad {
189 1.11 ad
190 1.11 ad if (panicstr != NULL)
191 1.11 ad return;
192 1.11 ad
193 1.46 christos LOCKDEBUG_ABORT(func, line, rw, &rwlock_lockops, msg);
194 1.11 ad }
195 1.11 ad
196 1.11 ad /*
197 1.2 ad * rw_init:
198 1.2 ad *
199 1.2 ad * Initialize a rwlock for use.
200 1.2 ad */
201 1.50 ozaki void _rw_init(krwlock_t *, uintptr_t);
202 1.2 ad void
203 1.50 ozaki _rw_init(krwlock_t *rw, uintptr_t return_address)
204 1.2 ad {
205 1.12 yamt bool dodebug;
206 1.2 ad
207 1.2 ad memset(rw, 0, sizeof(*rw));
208 1.2 ad
209 1.50 ozaki dodebug = LOCKDEBUG_ALLOC(rw, &rwlock_lockops, return_address);
210 1.12 yamt RW_SETDEBUG(rw, dodebug);
211 1.2 ad }
212 1.2 ad
213 1.50 ozaki void
214 1.50 ozaki rw_init(krwlock_t *rw)
215 1.50 ozaki {
216 1.50 ozaki
217 1.50 ozaki _rw_init(rw, (uintptr_t)__builtin_return_address(0));
218 1.50 ozaki }
219 1.50 ozaki
220 1.2 ad /*
221 1.2 ad * rw_destroy:
222 1.2 ad *
223 1.2 ad * Tear down a rwlock.
224 1.2 ad */
225 1.2 ad void
226 1.2 ad rw_destroy(krwlock_t *rw)
227 1.2 ad {
228 1.2 ad
229 1.36 skrll RW_ASSERT(rw, (rw->rw_owner & ~RW_NODEBUG) == 0);
230 1.12 yamt LOCKDEBUG_FREE(RW_DEBUG_P(rw), rw);
231 1.2 ad }
232 1.2 ad
233 1.2 ad /*
234 1.37 rmind * rw_oncpu:
235 1.20 ad *
236 1.20 ad * Return true if an rwlock owner is running on a CPU in the system.
237 1.20 ad * If the target is waiting on the kernel big lock, then we must
238 1.20 ad * release it. This is necessary to avoid deadlock.
239 1.20 ad */
240 1.37 rmind static bool
241 1.37 rmind rw_oncpu(uintptr_t owner)
242 1.20 ad {
243 1.20 ad #ifdef MULTIPROCESSOR
244 1.20 ad struct cpu_info *ci;
245 1.20 ad lwp_t *l;
246 1.20 ad
247 1.37 rmind KASSERT(kpreempt_disabled());
248 1.37 rmind
249 1.37 rmind if ((owner & (RW_WRITE_LOCKED|RW_HAS_WAITERS)) != RW_WRITE_LOCKED) {
250 1.37 rmind return false;
251 1.37 rmind }
252 1.37 rmind
253 1.37 rmind /*
254 1.37 rmind * See lwp_dtor() why dereference of the LWP pointer is safe.
255 1.37 rmind * We must have kernel preemption disabled for that.
256 1.37 rmind */
257 1.20 ad l = (lwp_t *)(owner & RW_THREAD);
258 1.37 rmind ci = l->l_cpu;
259 1.20 ad
260 1.37 rmind if (ci && ci->ci_curlwp == l) {
261 1.37 rmind /* Target is running; do we need to block? */
262 1.37 rmind return (ci->ci_biglock_wanted != l);
263 1.37 rmind }
264 1.37 rmind #endif
265 1.37 rmind /* Not running. It may be safe to block now. */
266 1.37 rmind return false;
267 1.20 ad }
268 1.20 ad
269 1.20 ad /*
270 1.2 ad * rw_vector_enter:
271 1.2 ad *
272 1.2 ad * Acquire a rwlock.
273 1.2 ad */
274 1.2 ad void
275 1.2 ad rw_vector_enter(krwlock_t *rw, const krw_t op)
276 1.2 ad {
277 1.20 ad uintptr_t owner, incr, need_wait, set_wait, curthread, next;
278 1.2 ad turnstile_t *ts;
279 1.2 ad int queue;
280 1.7 ad lwp_t *l;
281 1.2 ad LOCKSTAT_TIMER(slptime);
282 1.20 ad LOCKSTAT_TIMER(slpcnt);
283 1.19 ad LOCKSTAT_TIMER(spintime);
284 1.19 ad LOCKSTAT_COUNTER(spincnt);
285 1.2 ad LOCKSTAT_FLAG(lsflag);
286 1.2 ad
287 1.2 ad l = curlwp;
288 1.2 ad curthread = (uintptr_t)l;
289 1.2 ad
290 1.13 ad RW_ASSERT(rw, !cpu_intr_p());
291 1.2 ad RW_ASSERT(rw, curthread != 0);
292 1.40 mlelstv RW_WANTLOCK(rw, op);
293 1.51 ozaki KDASSERT(pserialize_not_in_read_section());
294 1.2 ad
295 1.2 ad if (panicstr == NULL) {
296 1.2 ad LOCKDEBUG_BARRIER(&kernel_lock, 1);
297 1.2 ad }
298 1.2 ad
299 1.2 ad /*
300 1.2 ad * We play a slight trick here. If we're a reader, we want
301 1.2 ad * increment the read count. If we're a writer, we want to
302 1.43 ozaki * set the owner field and the WRITE_LOCKED bit.
303 1.2 ad *
304 1.2 ad * In the latter case, we expect those bits to be zero,
305 1.2 ad * therefore we can use an add operation to set them, which
306 1.2 ad * means an add operation for both cases.
307 1.2 ad */
308 1.2 ad if (__predict_true(op == RW_READER)) {
309 1.2 ad incr = RW_READ_INCR;
310 1.2 ad set_wait = RW_HAS_WAITERS;
311 1.2 ad need_wait = RW_WRITE_LOCKED | RW_WRITE_WANTED;
312 1.2 ad queue = TS_READER_Q;
313 1.2 ad } else {
314 1.2 ad RW_DASSERT(rw, op == RW_WRITER);
315 1.2 ad incr = curthread | RW_WRITE_LOCKED;
316 1.2 ad set_wait = RW_HAS_WAITERS | RW_WRITE_WANTED;
317 1.2 ad need_wait = RW_WRITE_LOCKED | RW_THREAD;
318 1.2 ad queue = TS_WRITER_Q;
319 1.2 ad }
320 1.2 ad
321 1.2 ad LOCKSTAT_ENTER(lsflag);
322 1.2 ad
323 1.37 rmind KPREEMPT_DISABLE(curlwp);
324 1.37 rmind for (owner = rw->rw_owner; ;) {
325 1.2 ad /*
326 1.2 ad * Read the lock owner field. If the need-to-wait
327 1.2 ad * indicator is clear, then try to acquire the lock.
328 1.2 ad */
329 1.2 ad if ((owner & need_wait) == 0) {
330 1.20 ad next = rw_cas(rw, owner, (owner + incr) &
331 1.20 ad ~RW_WRITE_WANTED);
332 1.20 ad if (__predict_true(next == owner)) {
333 1.2 ad /* Got it! */
334 1.20 ad membar_enter();
335 1.2 ad break;
336 1.2 ad }
337 1.2 ad
338 1.2 ad /*
339 1.2 ad * Didn't get it -- spin around again (we'll
340 1.2 ad * probably sleep on the next iteration).
341 1.2 ad */
342 1.20 ad owner = next;
343 1.2 ad continue;
344 1.2 ad }
345 1.37 rmind if (__predict_false(panicstr != NULL)) {
346 1.45 uebayasi KPREEMPT_ENABLE(curlwp);
347 1.2 ad return;
348 1.37 rmind }
349 1.37 rmind if (__predict_false(RW_OWNER(rw) == curthread)) {
350 1.46 christos rw_abort(__func__, __LINE__, rw,
351 1.46 christos "locking against myself");
352 1.37 rmind }
353 1.19 ad /*
354 1.19 ad * If the lock owner is running on another CPU, and
355 1.19 ad * there are no existing waiters, then spin.
356 1.19 ad */
357 1.37 rmind if (rw_oncpu(owner)) {
358 1.19 ad LOCKSTAT_START_TIMER(lsflag, spintime);
359 1.19 ad u_int count = SPINLOCK_BACKOFF_MIN;
360 1.20 ad do {
361 1.38 rmind KPREEMPT_ENABLE(curlwp);
362 1.20 ad SPINLOCK_BACKOFF(count);
363 1.38 rmind KPREEMPT_DISABLE(curlwp);
364 1.19 ad owner = rw->rw_owner;
365 1.37 rmind } while (rw_oncpu(owner));
366 1.19 ad LOCKSTAT_STOP_TIMER(lsflag, spintime);
367 1.19 ad LOCKSTAT_COUNT(spincnt, 1);
368 1.19 ad if ((owner & need_wait) == 0)
369 1.19 ad continue;
370 1.19 ad }
371 1.19 ad
372 1.2 ad /*
373 1.2 ad * Grab the turnstile chain lock. Once we have that, we
374 1.2 ad * can adjust the waiter bits and sleep queue.
375 1.2 ad */
376 1.2 ad ts = turnstile_lookup(rw);
377 1.2 ad
378 1.2 ad /*
379 1.2 ad * Mark the rwlock as having waiters. If the set fails,
380 1.2 ad * then we may not need to sleep and should spin again.
381 1.20 ad * Reload rw_owner because turnstile_lookup() may have
382 1.20 ad * spun on the turnstile chain lock.
383 1.2 ad */
384 1.20 ad owner = rw->rw_owner;
385 1.37 rmind if ((owner & need_wait) == 0 || rw_oncpu(owner)) {
386 1.20 ad turnstile_exit(rw);
387 1.20 ad continue;
388 1.20 ad }
389 1.20 ad next = rw_cas(rw, owner, owner | set_wait);
390 1.20 ad if (__predict_false(next != owner)) {
391 1.2 ad turnstile_exit(rw);
392 1.20 ad owner = next;
393 1.2 ad continue;
394 1.2 ad }
395 1.2 ad
396 1.2 ad LOCKSTAT_START_TIMER(lsflag, slptime);
397 1.4 yamt turnstile_block(ts, queue, rw, &rw_syncobj);
398 1.2 ad LOCKSTAT_STOP_TIMER(lsflag, slptime);
399 1.20 ad LOCKSTAT_COUNT(slpcnt, 1);
400 1.2 ad
401 1.20 ad /*
402 1.20 ad * No need for a memory barrier because of context switch.
403 1.20 ad * If not handed the lock, then spin again.
404 1.20 ad */
405 1.20 ad if (op == RW_READER || (rw->rw_owner & RW_THREAD) == curthread)
406 1.20 ad break;
407 1.39 yamt
408 1.39 yamt owner = rw->rw_owner;
409 1.2 ad }
410 1.37 rmind KPREEMPT_ENABLE(curlwp);
411 1.2 ad
412 1.20 ad LOCKSTAT_EVENT(lsflag, rw, LB_RWLOCK |
413 1.20 ad (op == RW_WRITER ? LB_SLEEP1 : LB_SLEEP2), slpcnt, slptime);
414 1.19 ad LOCKSTAT_EVENT(lsflag, rw, LB_RWLOCK | LB_SPIN, spincnt, spintime);
415 1.2 ad LOCKSTAT_EXIT(lsflag);
416 1.2 ad
417 1.2 ad RW_DASSERT(rw, (op != RW_READER && RW_OWNER(rw) == curthread) ||
418 1.2 ad (op == RW_READER && RW_COUNT(rw) != 0));
419 1.2 ad RW_LOCKED(rw, op);
420 1.2 ad }
421 1.2 ad
422 1.2 ad /*
423 1.2 ad * rw_vector_exit:
424 1.2 ad *
425 1.2 ad * Release a rwlock.
426 1.2 ad */
427 1.2 ad void
428 1.2 ad rw_vector_exit(krwlock_t *rw)
429 1.2 ad {
430 1.44 matt uintptr_t curthread, owner, decr, newown, next;
431 1.2 ad turnstile_t *ts;
432 1.2 ad int rcnt, wcnt;
433 1.7 ad lwp_t *l;
434 1.2 ad
435 1.2 ad curthread = (uintptr_t)curlwp;
436 1.2 ad RW_ASSERT(rw, curthread != 0);
437 1.2 ad
438 1.20 ad if (__predict_false(panicstr != NULL))
439 1.2 ad return;
440 1.2 ad
441 1.2 ad /*
442 1.2 ad * Again, we use a trick. Since we used an add operation to
443 1.2 ad * set the required lock bits, we can use a subtract to clear
444 1.2 ad * them, which makes the read-release and write-release path
445 1.2 ad * the same.
446 1.2 ad */
447 1.2 ad owner = rw->rw_owner;
448 1.2 ad if (__predict_false((owner & RW_WRITE_LOCKED) != 0)) {
449 1.2 ad RW_UNLOCKED(rw, RW_WRITER);
450 1.2 ad RW_ASSERT(rw, RW_OWNER(rw) == curthread);
451 1.2 ad decr = curthread | RW_WRITE_LOCKED;
452 1.2 ad } else {
453 1.2 ad RW_UNLOCKED(rw, RW_READER);
454 1.2 ad RW_ASSERT(rw, RW_COUNT(rw) != 0);
455 1.2 ad decr = RW_READ_INCR;
456 1.2 ad }
457 1.2 ad
458 1.2 ad /*
459 1.2 ad * Compute what we expect the new value of the lock to be. Only
460 1.2 ad * proceed to do direct handoff if there are waiters, and if the
461 1.2 ad * lock would become unowned.
462 1.2 ad */
463 1.20 ad membar_exit();
464 1.20 ad for (;;) {
465 1.44 matt newown = (owner - decr);
466 1.44 matt if ((newown & (RW_THREAD | RW_HAS_WAITERS)) == RW_HAS_WAITERS)
467 1.2 ad break;
468 1.44 matt next = rw_cas(rw, owner, newown);
469 1.20 ad if (__predict_true(next == owner))
470 1.2 ad return;
471 1.20 ad owner = next;
472 1.2 ad }
473 1.2 ad
474 1.20 ad /*
475 1.20 ad * Grab the turnstile chain lock. This gets the interlock
476 1.20 ad * on the sleep queue. Once we have that, we can adjust the
477 1.20 ad * waiter bits.
478 1.20 ad */
479 1.20 ad ts = turnstile_lookup(rw);
480 1.20 ad owner = rw->rw_owner;
481 1.20 ad RW_DASSERT(rw, ts != NULL);
482 1.20 ad RW_DASSERT(rw, (owner & RW_HAS_WAITERS) != 0);
483 1.2 ad
484 1.20 ad wcnt = TS_WAITERS(ts, TS_WRITER_Q);
485 1.20 ad rcnt = TS_WAITERS(ts, TS_READER_Q);
486 1.2 ad
487 1.20 ad /*
488 1.20 ad * Give the lock away.
489 1.20 ad *
490 1.20 ad * If we are releasing a write lock, then prefer to wake all
491 1.20 ad * outstanding readers. Otherwise, wake one writer if there
492 1.20 ad * are outstanding readers, or all writers if there are no
493 1.20 ad * pending readers. If waking one specific writer, the writer
494 1.20 ad * is handed the lock here. If waking multiple writers, we
495 1.20 ad * set WRITE_WANTED to block out new readers, and let them
496 1.41 skrll * do the work of acquiring the lock in rw_vector_enter().
497 1.20 ad */
498 1.32 yamt if (rcnt == 0 || decr == RW_READ_INCR) {
499 1.20 ad RW_DASSERT(rw, wcnt != 0);
500 1.20 ad RW_DASSERT(rw, (owner & RW_WRITE_WANTED) != 0);
501 1.2 ad
502 1.20 ad if (rcnt != 0) {
503 1.20 ad /* Give the lock to the longest waiting writer. */
504 1.2 ad l = TS_FIRST(ts, TS_WRITER_Q);
505 1.44 matt newown = (uintptr_t)l | RW_WRITE_LOCKED | RW_HAS_WAITERS;
506 1.28 thorpej if (wcnt > 1)
507 1.44 matt newown |= RW_WRITE_WANTED;
508 1.44 matt rw_swap(rw, owner, newown);
509 1.7 ad turnstile_wakeup(ts, TS_WRITER_Q, 1, l);
510 1.2 ad } else {
511 1.20 ad /* Wake all writers and let them fight it out. */
512 1.20 ad rw_swap(rw, owner, RW_WRITE_WANTED);
513 1.20 ad turnstile_wakeup(ts, TS_WRITER_Q, wcnt, NULL);
514 1.20 ad }
515 1.20 ad } else {
516 1.20 ad RW_DASSERT(rw, rcnt != 0);
517 1.2 ad
518 1.20 ad /*
519 1.20 ad * Give the lock to all blocked readers. If there
520 1.20 ad * is a writer waiting, new readers that arrive
521 1.20 ad * after the release will be blocked out.
522 1.20 ad */
523 1.44 matt newown = rcnt << RW_READ_COUNT_SHIFT;
524 1.20 ad if (wcnt != 0)
525 1.44 matt newown |= RW_HAS_WAITERS | RW_WRITE_WANTED;
526 1.12 yamt
527 1.20 ad /* Wake up all sleeping readers. */
528 1.44 matt rw_swap(rw, owner, newown);
529 1.20 ad turnstile_wakeup(ts, TS_READER_Q, rcnt, NULL);
530 1.2 ad }
531 1.2 ad }
532 1.2 ad
533 1.2 ad /*
534 1.16 ad * rw_vector_tryenter:
535 1.2 ad *
536 1.2 ad * Try to acquire a rwlock.
537 1.2 ad */
538 1.2 ad int
539 1.16 ad rw_vector_tryenter(krwlock_t *rw, const krw_t op)
540 1.2 ad {
541 1.20 ad uintptr_t curthread, owner, incr, need_wait, next;
542 1.2 ad
543 1.2 ad curthread = (uintptr_t)curlwp;
544 1.2 ad
545 1.2 ad RW_ASSERT(rw, curthread != 0);
546 1.2 ad
547 1.2 ad if (op == RW_READER) {
548 1.2 ad incr = RW_READ_INCR;
549 1.2 ad need_wait = RW_WRITE_LOCKED | RW_WRITE_WANTED;
550 1.2 ad } else {
551 1.2 ad RW_DASSERT(rw, op == RW_WRITER);
552 1.2 ad incr = curthread | RW_WRITE_LOCKED;
553 1.2 ad need_wait = RW_WRITE_LOCKED | RW_THREAD;
554 1.2 ad }
555 1.2 ad
556 1.20 ad for (owner = rw->rw_owner;; owner = next) {
557 1.2 ad owner = rw->rw_owner;
558 1.20 ad if (__predict_false((owner & need_wait) != 0))
559 1.20 ad return 0;
560 1.20 ad next = rw_cas(rw, owner, owner + incr);
561 1.20 ad if (__predict_true(next == owner)) {
562 1.20 ad /* Got it! */
563 1.30 ad membar_enter();
564 1.20 ad break;
565 1.2 ad }
566 1.2 ad }
567 1.2 ad
568 1.40 mlelstv RW_WANTLOCK(rw, op);
569 1.2 ad RW_LOCKED(rw, op);
570 1.2 ad RW_DASSERT(rw, (op != RW_READER && RW_OWNER(rw) == curthread) ||
571 1.2 ad (op == RW_READER && RW_COUNT(rw) != 0));
572 1.7 ad
573 1.2 ad return 1;
574 1.2 ad }
575 1.2 ad
576 1.2 ad /*
577 1.2 ad * rw_downgrade:
578 1.2 ad *
579 1.2 ad * Downgrade a write lock to a read lock.
580 1.2 ad */
581 1.2 ad void
582 1.2 ad rw_downgrade(krwlock_t *rw)
583 1.2 ad {
584 1.44 matt uintptr_t owner, curthread, newown, next;
585 1.2 ad turnstile_t *ts;
586 1.2 ad int rcnt, wcnt;
587 1.2 ad
588 1.2 ad curthread = (uintptr_t)curlwp;
589 1.2 ad RW_ASSERT(rw, curthread != 0);
590 1.2 ad RW_DASSERT(rw, (rw->rw_owner & RW_WRITE_LOCKED) != 0);
591 1.2 ad RW_ASSERT(rw, RW_OWNER(rw) == curthread);
592 1.2 ad RW_UNLOCKED(rw, RW_WRITER);
593 1.42 mrg #if !defined(DIAGNOSTIC)
594 1.42 mrg __USE(curthread);
595 1.42 mrg #endif
596 1.42 mrg
597 1.2 ad
598 1.20 ad membar_producer();
599 1.2 ad owner = rw->rw_owner;
600 1.2 ad if ((owner & RW_HAS_WAITERS) == 0) {
601 1.2 ad /*
602 1.2 ad * There are no waiters, so we can do this the easy way.
603 1.2 ad * Try swapping us down to one read hold. If it fails, the
604 1.2 ad * lock condition has changed and we most likely now have
605 1.2 ad * waiters.
606 1.2 ad */
607 1.20 ad next = rw_cas(rw, owner, RW_READ_INCR);
608 1.20 ad if (__predict_true(next == owner)) {
609 1.2 ad RW_LOCKED(rw, RW_READER);
610 1.2 ad RW_DASSERT(rw, (rw->rw_owner & RW_WRITE_LOCKED) == 0);
611 1.2 ad RW_DASSERT(rw, RW_COUNT(rw) != 0);
612 1.2 ad return;
613 1.2 ad }
614 1.20 ad owner = next;
615 1.2 ad }
616 1.2 ad
617 1.2 ad /*
618 1.2 ad * Grab the turnstile chain lock. This gets the interlock
619 1.2 ad * on the sleep queue. Once we have that, we can adjust the
620 1.2 ad * waiter bits.
621 1.2 ad */
622 1.20 ad for (;; owner = next) {
623 1.2 ad ts = turnstile_lookup(rw);
624 1.2 ad RW_DASSERT(rw, ts != NULL);
625 1.2 ad
626 1.2 ad rcnt = TS_WAITERS(ts, TS_READER_Q);
627 1.2 ad wcnt = TS_WAITERS(ts, TS_WRITER_Q);
628 1.2 ad
629 1.2 ad /*
630 1.2 ad * If there are no readers, just preserve the waiters
631 1.2 ad * bits, swap us down to one read hold and return.
632 1.2 ad */
633 1.2 ad if (rcnt == 0) {
634 1.2 ad RW_DASSERT(rw, wcnt != 0);
635 1.2 ad RW_DASSERT(rw, (rw->rw_owner & RW_WRITE_WANTED) != 0);
636 1.2 ad RW_DASSERT(rw, (rw->rw_owner & RW_HAS_WAITERS) != 0);
637 1.2 ad
638 1.44 matt newown = RW_READ_INCR | RW_HAS_WAITERS | RW_WRITE_WANTED;
639 1.44 matt next = rw_cas(rw, owner, newown);
640 1.27 rmind turnstile_exit(rw);
641 1.20 ad if (__predict_true(next == owner))
642 1.20 ad break;
643 1.20 ad } else {
644 1.20 ad /*
645 1.20 ad * Give the lock to all blocked readers. We may
646 1.20 ad * retain one read hold if downgrading. If there
647 1.20 ad * is a writer waiting, new readers will be blocked
648 1.20 ad * out.
649 1.20 ad */
650 1.44 matt newown = (rcnt << RW_READ_COUNT_SHIFT) + RW_READ_INCR;
651 1.20 ad if (wcnt != 0)
652 1.44 matt newown |= RW_HAS_WAITERS | RW_WRITE_WANTED;
653 1.20 ad
654 1.44 matt next = rw_cas(rw, owner, newown);
655 1.20 ad if (__predict_true(next == owner)) {
656 1.20 ad /* Wake up all sleeping readers. */
657 1.20 ad turnstile_wakeup(ts, TS_READER_Q, rcnt, NULL);
658 1.20 ad break;
659 1.2 ad }
660 1.27 rmind turnstile_exit(rw);
661 1.2 ad }
662 1.2 ad }
663 1.2 ad
664 1.40 mlelstv RW_WANTLOCK(rw, RW_READER);
665 1.2 ad RW_LOCKED(rw, RW_READER);
666 1.2 ad RW_DASSERT(rw, (rw->rw_owner & RW_WRITE_LOCKED) == 0);
667 1.2 ad RW_DASSERT(rw, RW_COUNT(rw) != 0);
668 1.2 ad }
669 1.2 ad
670 1.2 ad /*
671 1.2 ad * rw_tryupgrade:
672 1.2 ad *
673 1.2 ad * Try to upgrade a read lock to a write lock. We must be the
674 1.2 ad * only reader.
675 1.2 ad */
676 1.2 ad int
677 1.2 ad rw_tryupgrade(krwlock_t *rw)
678 1.2 ad {
679 1.44 matt uintptr_t owner, curthread, newown, next;
680 1.2 ad
681 1.2 ad curthread = (uintptr_t)curlwp;
682 1.2 ad RW_ASSERT(rw, curthread != 0);
683 1.31 yamt RW_ASSERT(rw, rw_read_held(rw));
684 1.2 ad
685 1.20 ad for (owner = rw->rw_owner;; owner = next) {
686 1.2 ad RW_ASSERT(rw, (owner & RW_WRITE_LOCKED) == 0);
687 1.20 ad if (__predict_false((owner & RW_THREAD) != RW_READ_INCR)) {
688 1.2 ad RW_ASSERT(rw, (owner & RW_THREAD) != 0);
689 1.2 ad return 0;
690 1.2 ad }
691 1.44 matt newown = curthread | RW_WRITE_LOCKED | (owner & ~RW_THREAD);
692 1.44 matt next = rw_cas(rw, owner, newown);
693 1.30 ad if (__predict_true(next == owner)) {
694 1.30 ad membar_producer();
695 1.2 ad break;
696 1.30 ad }
697 1.2 ad }
698 1.2 ad
699 1.2 ad RW_UNLOCKED(rw, RW_READER);
700 1.40 mlelstv RW_WANTLOCK(rw, RW_WRITER);
701 1.2 ad RW_LOCKED(rw, RW_WRITER);
702 1.2 ad RW_DASSERT(rw, rw->rw_owner & RW_WRITE_LOCKED);
703 1.2 ad RW_DASSERT(rw, RW_OWNER(rw) == curthread);
704 1.2 ad
705 1.2 ad return 1;
706 1.2 ad }
707 1.2 ad
708 1.2 ad /*
709 1.2 ad * rw_read_held:
710 1.2 ad *
711 1.2 ad * Returns true if the rwlock is held for reading. Must only be
712 1.2 ad * used for diagnostic assertions, and never be used to make
713 1.2 ad * decisions about how to use a rwlock.
714 1.2 ad */
715 1.2 ad int
716 1.2 ad rw_read_held(krwlock_t *rw)
717 1.2 ad {
718 1.2 ad uintptr_t owner;
719 1.2 ad
720 1.2 ad if (panicstr != NULL)
721 1.2 ad return 1;
722 1.21 ad if (rw == NULL)
723 1.21 ad return 0;
724 1.2 ad owner = rw->rw_owner;
725 1.2 ad return (owner & RW_WRITE_LOCKED) == 0 && (owner & RW_THREAD) != 0;
726 1.2 ad }
727 1.2 ad
728 1.2 ad /*
729 1.2 ad * rw_write_held:
730 1.2 ad *
731 1.2 ad * Returns true if the rwlock is held for writing. Must only be
732 1.2 ad * used for diagnostic assertions, and never be used to make
733 1.2 ad * decisions about how to use a rwlock.
734 1.2 ad */
735 1.2 ad int
736 1.2 ad rw_write_held(krwlock_t *rw)
737 1.2 ad {
738 1.2 ad
739 1.2 ad if (panicstr != NULL)
740 1.2 ad return 1;
741 1.21 ad if (rw == NULL)
742 1.21 ad return 0;
743 1.17 ad return (rw->rw_owner & (RW_WRITE_LOCKED | RW_THREAD)) ==
744 1.18 ad (RW_WRITE_LOCKED | (uintptr_t)curlwp);
745 1.2 ad }
746 1.2 ad
747 1.2 ad /*
748 1.2 ad * rw_lock_held:
749 1.2 ad *
750 1.2 ad * Returns true if the rwlock is held for reading or writing. Must
751 1.2 ad * only be used for diagnostic assertions, and never be used to make
752 1.2 ad * decisions about how to use a rwlock.
753 1.2 ad */
754 1.2 ad int
755 1.2 ad rw_lock_held(krwlock_t *rw)
756 1.2 ad {
757 1.2 ad
758 1.2 ad if (panicstr != NULL)
759 1.2 ad return 1;
760 1.21 ad if (rw == NULL)
761 1.21 ad return 0;
762 1.2 ad return (rw->rw_owner & RW_THREAD) != 0;
763 1.2 ad }
764 1.4 yamt
765 1.5 ad /*
766 1.5 ad * rw_owner:
767 1.5 ad *
768 1.5 ad * Return the current owner of an RW lock, but only if it is write
769 1.5 ad * held. Used for priority inheritance.
770 1.5 ad */
771 1.7 ad static lwp_t *
772 1.4 yamt rw_owner(wchan_t obj)
773 1.4 yamt {
774 1.4 yamt krwlock_t *rw = (void *)(uintptr_t)obj; /* discard qualifiers */
775 1.4 yamt uintptr_t owner = rw->rw_owner;
776 1.4 yamt
777 1.4 yamt if ((owner & RW_WRITE_LOCKED) == 0)
778 1.4 yamt return NULL;
779 1.4 yamt
780 1.4 yamt return (void *)(owner & RW_THREAD);
781 1.4 yamt }
782