locks_up.c revision 1.6.2.2 1 /* $NetBSD: locks_up.c,v 1.6.2.2 2017/12/03 11:39:16 jdolecek Exp $ */
2
3 /*
4 * Copyright (c) 2010 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 /*
29 * Virtual uniprocessor rump kernel version of locks. Since the entire
30 * kernel is running on only one CPU in the system, there is no need
31 * to perform slow cache-coherent MP locking operations. This speeds
32 * up things quite dramatically and is a good example of that two
33 * disjoint kernels running simultaneously in an MP system can be
34 * massively faster than one with fine-grained locking.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: locks_up.c,v 1.6.2.2 2017/12/03 11:39:16 jdolecek Exp $");
39
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/kmem.h>
43 #include <sys/mutex.h>
44 #include <sys/rwlock.h>
45
46 #include <rump-sys/kern.h>
47
48 #include <rump/rumpuser.h>
49
50 struct upmtx {
51 struct lwp *upm_owner;
52 int upm_wanted;
53 struct rumpuser_cv *upm_rucv;
54 };
55 #define UPMTX(mtx) struct upmtx *upm = *(struct upmtx **)mtx
56
57 static inline void
58 checkncpu(void)
59 {
60
61 if (__predict_false(ncpu != 1))
62 panic("UP lock implementation requires RUMP_NCPU == 1");
63 }
64
65 void
66 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
67 {
68 struct upmtx *upm;
69
70 CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
71 checkncpu();
72
73 /*
74 * In uniprocessor locking we don't need to differentiate
75 * between spin mutexes and adaptive ones. We could
76 * replace mutex_enter() with a NOP for spin mutexes, but
77 * not bothering with that for now.
78 */
79
80 /*
81 * XXX: pool_cache would be nice, but not easily possible,
82 * as pool cache init wants to call mutex_init() ...
83 */
84 upm = rump_hypermalloc(sizeof(*upm), 0, true, "mutex_init");
85 memset(upm, 0, sizeof(*upm));
86 rumpuser_cv_init(&upm->upm_rucv);
87 memcpy(mtx, &upm, sizeof(void *));
88 }
89
90 void
91 mutex_destroy(kmutex_t *mtx)
92 {
93 UPMTX(mtx);
94
95 KASSERT(upm->upm_owner == NULL);
96 KASSERT(upm->upm_wanted == 0);
97 rumpuser_cv_destroy(upm->upm_rucv);
98 rump_hyperfree(upm, sizeof(*upm));
99 }
100
101 void
102 mutex_enter(kmutex_t *mtx)
103 {
104 UPMTX(mtx);
105
106 /* fastpath? */
107 if (mutex_tryenter(mtx))
108 return;
109
110 /*
111 * No? bummer, do it the slow and painful way then.
112 */
113 upm->upm_wanted++;
114 while (!mutex_tryenter(mtx)) {
115 rump_schedlock_cv_wait(upm->upm_rucv);
116 }
117 upm->upm_wanted--;
118
119 KASSERT(upm->upm_wanted >= 0);
120 }
121
122 void
123 mutex_spin_enter(kmutex_t *mtx)
124 {
125
126 mutex_enter(mtx);
127 }
128
129 int
130 mutex_tryenter(kmutex_t *mtx)
131 {
132 UPMTX(mtx);
133
134 if (upm->upm_owner)
135 return 0;
136
137 upm->upm_owner = curlwp;
138 return 1;
139 }
140
141 void
142 mutex_exit(kmutex_t *mtx)
143 {
144 UPMTX(mtx);
145
146 if (upm->upm_wanted) {
147 rumpuser_cv_signal(upm->upm_rucv); /* CPU is our interlock */
148 }
149 upm->upm_owner = NULL;
150 }
151
152 void
153 mutex_spin_exit(kmutex_t *mtx)
154 {
155
156 mutex_exit(mtx);
157 }
158
159 int
160 mutex_owned(kmutex_t *mtx)
161 {
162 UPMTX(mtx);
163
164 return upm->upm_owner == curlwp;
165 }
166
167 struct lwp *
168 mutex_owner(kmutex_t *mtx)
169 {
170 UPMTX(mtx);
171
172 return upm->upm_owner;
173 }
174
175 struct uprw {
176 struct lwp *uprw_owner;
177 int uprw_readers;
178 uint16_t uprw_rwant;
179 uint16_t uprw_wwant;
180 struct rumpuser_cv *uprw_rucv_reader;
181 struct rumpuser_cv *uprw_rucv_writer;
182 };
183
184 #define UPRW(rw) struct uprw *uprw = *(struct uprw **)rw
185
186 /* reader/writer locks */
187
188 void
189 rw_init(krwlock_t *rw)
190 {
191 struct uprw *uprw;
192
193 CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
194 checkncpu();
195
196 uprw = rump_hypermalloc(sizeof(*uprw), 0, true, "rwinit");
197 memset(uprw, 0, sizeof(*uprw));
198 rumpuser_cv_init(&uprw->uprw_rucv_reader);
199 rumpuser_cv_init(&uprw->uprw_rucv_writer);
200 memcpy(rw, &uprw, sizeof(void *));
201 }
202
203 void
204 rw_destroy(krwlock_t *rw)
205 {
206 UPRW(rw);
207
208 rumpuser_cv_destroy(uprw->uprw_rucv_reader);
209 rumpuser_cv_destroy(uprw->uprw_rucv_writer);
210 rump_hyperfree(uprw, sizeof(*uprw));
211 }
212
213 /* take rwlock. prefer writers over readers (see rw_tryenter and rw_exit) */
214 void
215 rw_enter(krwlock_t *rw, const krw_t op)
216 {
217 UPRW(rw);
218 struct rumpuser_cv *rucv;
219 uint16_t *wp;
220
221 if (rw_tryenter(rw, op))
222 return;
223
224 /* lagpath */
225 if (op == RW_READER) {
226 rucv = uprw->uprw_rucv_reader;
227 wp = &uprw->uprw_rwant;
228 } else {
229 rucv = uprw->uprw_rucv_writer;
230 wp = &uprw->uprw_wwant;
231 }
232
233 (*wp)++;
234 while (!rw_tryenter(rw, op)) {
235 rump_schedlock_cv_wait(rucv);
236 }
237 (*wp)--;
238 }
239
240 int
241 rw_tryenter(krwlock_t *rw, const krw_t op)
242 {
243 UPRW(rw);
244
245 switch (op) {
246 case RW_READER:
247 if (uprw->uprw_owner == NULL && uprw->uprw_wwant == 0) {
248 uprw->uprw_readers++;
249 return 1;
250 }
251 break;
252 case RW_WRITER:
253 if (uprw->uprw_owner == NULL && uprw->uprw_readers == 0) {
254 uprw->uprw_owner = curlwp;
255 return 1;
256 }
257 break;
258 }
259
260 return 0;
261 }
262
263 void
264 rw_exit(krwlock_t *rw)
265 {
266 UPRW(rw);
267
268 if (uprw->uprw_readers > 0) {
269 uprw->uprw_readers--;
270 } else {
271 KASSERT(uprw->uprw_owner == curlwp);
272 uprw->uprw_owner = NULL;
273 }
274
275 if (uprw->uprw_wwant) {
276 rumpuser_cv_signal(uprw->uprw_rucv_writer);
277 } else if (uprw->uprw_rwant) {
278 rumpuser_cv_signal(uprw->uprw_rucv_reader);
279 }
280 }
281
282 int
283 rw_tryupgrade(krwlock_t *rw)
284 {
285 UPRW(rw);
286
287 if (uprw->uprw_readers == 1 && uprw->uprw_owner == NULL) {
288 uprw->uprw_readers = 0;
289 uprw->uprw_owner = curlwp;
290 return 1;
291 } else {
292 return 0;
293 }
294 }
295
296 int
297 rw_write_held(krwlock_t *rw)
298 {
299 UPRW(rw);
300
301 return uprw->uprw_owner == curlwp;
302 }
303
304 int
305 rw_read_held(krwlock_t *rw)
306 {
307 UPRW(rw);
308
309 return uprw->uprw_readers > 0;
310 }
311
312 int
313 rw_lock_held(krwlock_t *rw)
314 {
315 UPRW(rw);
316
317 return uprw->uprw_owner || uprw->uprw_readers;
318 }
319
320
321 /*
322 * Condvars are almost the same as in the MP case except that we
323 * use the scheduler mutex as the pthread interlock instead of the
324 * mutex associated with the condvar.
325 */
326
327 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
328
329 void
330 cv_init(kcondvar_t *cv, const char *msg)
331 {
332
333 CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
334 checkncpu();
335
336 rumpuser_cv_init((struct rumpuser_cv **)cv);
337 }
338
339 void
340 cv_destroy(kcondvar_t *cv)
341 {
342
343 rumpuser_cv_destroy(RUMPCV(cv));
344 }
345
346 void
347 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
348 {
349 #ifdef DIAGNOSTIC
350 UPMTX(mtx);
351 KASSERT(upm->upm_owner == curlwp);
352
353 if (rump_threads == 0)
354 panic("cv_wait without threads");
355 #endif
356
357 /*
358 * NOTE: we must atomically release the *CPU* here, i.e.
359 * nothing between mutex_exit and entering rumpuser condwait
360 * may preempt us from the virtual CPU.
361 */
362 mutex_exit(mtx);
363 rump_schedlock_cv_wait(RUMPCV(cv));
364 mutex_enter(mtx);
365 }
366
367 int
368 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
369 {
370
371 cv_wait(cv, mtx);
372 return 0;
373 }
374
375 int
376 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
377 {
378 struct timespec ts;
379
380 #ifdef DIAGNOSTIC
381 UPMTX(mtx);
382 KASSERT(upm->upm_owner == curlwp);
383 #endif
384
385 ts.tv_sec = ticks / hz;
386 ts.tv_nsec = (ticks % hz) * (1000000000/hz);
387
388 if (ticks == 0) {
389 cv_wait(cv, mtx);
390 return 0;
391 } else {
392 int rv;
393 mutex_exit(mtx);
394 rv = rump_schedlock_cv_timedwait(RUMPCV(cv), &ts);
395 mutex_enter(mtx);
396 if (rv)
397 return EWOULDBLOCK;
398 else
399 return 0;
400 }
401 }
402
403 int
404 cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int ticks)
405 {
406
407 return cv_timedwait(cv, mtx, ticks);
408 }
409
410 void
411 cv_signal(kcondvar_t *cv)
412 {
413
414 /* CPU == interlock */
415 rumpuser_cv_signal(RUMPCV(cv));
416 }
417
418 void
419 cv_broadcast(kcondvar_t *cv)
420 {
421
422 /* CPU == interlock */
423 rumpuser_cv_broadcast(RUMPCV(cv));
424 }
425
426 bool
427 cv_has_waiters(kcondvar_t *cv)
428 {
429 int n;
430
431 rumpuser_cv_has_waiters(RUMPCV(cv), &n);
432
433 return n > 0;
434 }
435
436 /* this is not much of an attempt, but ... */
437 bool
438 cv_is_valid(kcondvar_t *cv)
439 {
440
441 return RUMPCV(cv) != NULL;
442 }
443