Home | History | Annotate | Line # | Download | only in rumpkern
locks.c revision 1.70
      1 /*	$NetBSD: locks.c,v 1.70 2015/09/30 01:31:56 ozaki-r Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2007-2011 Antti Kantee.  All Rights Reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     18  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     25  * SUCH DAMAGE.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.70 2015/09/30 01:31:56 ozaki-r Exp $");
     30 
     31 #include <sys/param.h>
     32 #include <sys/kmem.h>
     33 #include <sys/mutex.h>
     34 #include <sys/rwlock.h>
     35 
     36 #include <rump/rumpuser.h>
     37 
     38 #include "rump_private.h"
     39 
     40 #ifdef LOCKDEBUG
     41 const int rump_lockdebug = 1;
     42 #else
     43 const int rump_lockdebug = 0;
     44 #endif
     45 
     46 /*
     47  * Simple lockdebug.  If it's compiled in, it's always active.
     48  * Currently available only for mtx/rwlock.
     49  */
     50 #ifdef LOCKDEBUG
     51 #include <sys/lockdebug.h>
     52 
     53 static lockops_t mutex_lockops = {
     54 	"mutex",
     55 	LOCKOPS_SLEEP,
     56 	NULL
     57 };
     58 static lockops_t rw_lockops = {
     59 	"rwlock",
     60 	LOCKOPS_SLEEP,
     61 	NULL
     62 };
     63 
     64 #define ALLOCK(lock, ops)		\
     65     lockdebug_alloc(lock, ops, (uintptr_t)__builtin_return_address(0))
     66 #define FREELOCK(lock)			\
     67     lockdebug_free(lock)
     68 #define WANTLOCK(lock, shar)	\
     69     lockdebug_wantlock(lock, (uintptr_t)__builtin_return_address(0), shar)
     70 #define LOCKED(lock, shar)		\
     71     lockdebug_locked(lock, NULL, (uintptr_t)__builtin_return_address(0), shar)
     72 #define UNLOCKED(lock, shar)		\
     73     lockdebug_unlocked(lock, (uintptr_t)__builtin_return_address(0), shar)
     74 #else
     75 #define ALLOCK(a, b)
     76 #define FREELOCK(a)
     77 #define WANTLOCK(a, b)
     78 #define LOCKED(a, b)
     79 #define UNLOCKED(a, b)
     80 #endif
     81 
     82 /*
     83  * We map locks to pthread routines.  The difference between kernel
     84  * and rumpuser routines is that while the kernel uses static
     85  * storage, rumpuser allocates the object from the heap.  This
     86  * indirection is necessary because we don't know the size of
     87  * pthread objects here.  It is also beneficial, since we can
     88  * be easily compatible with the kernel ABI because all kernel
     89  * objects regardless of machine architecture are always at least
     90  * the size of a pointer.  The downside, of course, is a performance
     91  * penalty.
     92  */
     93 
     94 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx))
     95 
     96 void
     97 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
     98 {
     99 	int ruflags = RUMPUSER_MTX_KMUTEX;
    100 	int isspin;
    101 
    102 	CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
    103 
    104 	/*
    105 	 * Try to figure out if the caller wanted a spin mutex or
    106 	 * not with this easy set of conditionals.  The difference
    107 	 * between a spin mutex and an adaptive mutex for a rump
    108 	 * kernel is that the hypervisor does not relinquish the
    109 	 * rump kernel CPU context for a spin mutex.  The
    110 	 * hypervisor itself may block even when "spinning".
    111 	 */
    112 	if (type == MUTEX_SPIN) {
    113 		isspin = 1;
    114 	} else if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
    115 	    ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
    116 	    ipl == IPL_SOFTSERIAL) {
    117 		isspin = 0;
    118 	} else {
    119 		isspin = 1;
    120 	}
    121 
    122 	if (isspin)
    123 		ruflags |= RUMPUSER_MTX_SPIN;
    124 	rumpuser_mutex_init((struct rumpuser_mtx **)mtx, ruflags);
    125 	ALLOCK(mtx, &mutex_lockops);
    126 }
    127 
    128 void
    129 mutex_destroy(kmutex_t *mtx)
    130 {
    131 
    132 	FREELOCK(mtx);
    133 	rumpuser_mutex_destroy(RUMPMTX(mtx));
    134 }
    135 
    136 void
    137 mutex_enter(kmutex_t *mtx)
    138 {
    139 
    140 	WANTLOCK(mtx, 0);
    141 	rumpuser_mutex_enter(RUMPMTX(mtx));
    142 	LOCKED(mtx, false);
    143 }
    144 
    145 void
    146 mutex_spin_enter(kmutex_t *mtx)
    147 {
    148 
    149 	WANTLOCK(mtx, 0);
    150 	rumpuser_mutex_enter_nowrap(RUMPMTX(mtx));
    151 	LOCKED(mtx, false);
    152 }
    153 
    154 int
    155 mutex_tryenter(kmutex_t *mtx)
    156 {
    157 	int error;
    158 
    159 	error = rumpuser_mutex_tryenter(RUMPMTX(mtx));
    160 	if (error == 0) {
    161 		WANTLOCK(mtx, 0);
    162 		LOCKED(mtx, false);
    163 	}
    164 	return error == 0;
    165 }
    166 
    167 void
    168 mutex_exit(kmutex_t *mtx)
    169 {
    170 
    171 	UNLOCKED(mtx, false);
    172 	rumpuser_mutex_exit(RUMPMTX(mtx));
    173 }
    174 __strong_alias(mutex_spin_exit,mutex_exit);
    175 
    176 int
    177 mutex_owned(kmutex_t *mtx)
    178 {
    179 
    180 	return mutex_owner(mtx) == curlwp;
    181 }
    182 
    183 struct lwp *
    184 mutex_owner(kmutex_t *mtx)
    185 {
    186 	struct lwp *l;
    187 
    188 	rumpuser_mutex_owner(RUMPMTX(mtx), &l);
    189 	return l;
    190 }
    191 
    192 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
    193 
    194 /* reader/writer locks */
    195 
    196 static enum rumprwlock
    197 krw2rumprw(const krw_t op)
    198 {
    199 
    200 	switch (op) {
    201 	case RW_READER:
    202 		return RUMPUSER_RW_READER;
    203 	case RW_WRITER:
    204 		return RUMPUSER_RW_WRITER;
    205 	default:
    206 		panic("unknown rwlock type");
    207 	}
    208 }
    209 
    210 void
    211 rw_init(krwlock_t *rw)
    212 {
    213 
    214 	CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
    215 
    216 	rumpuser_rw_init((struct rumpuser_rw **)rw);
    217 	ALLOCK(rw, &rw_lockops);
    218 }
    219 
    220 void
    221 rw_destroy(krwlock_t *rw)
    222 {
    223 
    224 	FREELOCK(rw);
    225 	rumpuser_rw_destroy(RUMPRW(rw));
    226 }
    227 
    228 void
    229 rw_enter(krwlock_t *rw, const krw_t op)
    230 {
    231 
    232 
    233 	WANTLOCK(rw, op == RW_READER);
    234 	rumpuser_rw_enter(krw2rumprw(op), RUMPRW(rw));
    235 	LOCKED(rw, op == RW_READER);
    236 }
    237 
    238 int
    239 rw_tryenter(krwlock_t *rw, const krw_t op)
    240 {
    241 	int error;
    242 
    243 	error = rumpuser_rw_tryenter(krw2rumprw(op), RUMPRW(rw));
    244 	if (error == 0) {
    245 		WANTLOCK(rw, op == RW_READER);
    246 		LOCKED(rw, op == RW_READER);
    247 	}
    248 	return error == 0;
    249 }
    250 
    251 void
    252 rw_exit(krwlock_t *rw)
    253 {
    254 
    255 #ifdef LOCKDEBUG
    256 	bool shared = !rw_write_held(rw);
    257 
    258 	if (shared)
    259 		KASSERT(rw_read_held(rw));
    260 	UNLOCKED(rw, shared);
    261 #endif
    262 	rumpuser_rw_exit(RUMPRW(rw));
    263 }
    264 
    265 int
    266 rw_tryupgrade(krwlock_t *rw)
    267 {
    268 	int rv;
    269 
    270 	rv = rumpuser_rw_tryupgrade(RUMPRW(rw));
    271 	if (rv == 0) {
    272 		UNLOCKED(rw, 1);
    273 		WANTLOCK(rw, 0);
    274 		LOCKED(rw, 0);
    275 	}
    276 	return rv == 0;
    277 }
    278 
    279 void
    280 rw_downgrade(krwlock_t *rw)
    281 {
    282 
    283 	rumpuser_rw_downgrade(RUMPRW(rw));
    284 	UNLOCKED(rw, 0);
    285 	WANTLOCK(rw, 1);
    286 	LOCKED(rw, 1);
    287 }
    288 
    289 int
    290 rw_read_held(krwlock_t *rw)
    291 {
    292 	int rv;
    293 
    294 	rumpuser_rw_held(RUMPUSER_RW_READER, RUMPRW(rw), &rv);
    295 	return rv;
    296 }
    297 
    298 int
    299 rw_write_held(krwlock_t *rw)
    300 {
    301 	int rv;
    302 
    303 	rumpuser_rw_held(RUMPUSER_RW_WRITER, RUMPRW(rw), &rv);
    304 	return rv;
    305 }
    306 
    307 int
    308 rw_lock_held(krwlock_t *rw)
    309 {
    310 
    311 	return rw_read_held(rw) || rw_write_held(rw);
    312 }
    313 
    314 /* curriculum vitaes */
    315 
    316 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
    317 
    318 void
    319 cv_init(kcondvar_t *cv, const char *msg)
    320 {
    321 
    322 	CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
    323 
    324 	rumpuser_cv_init((struct rumpuser_cv **)cv);
    325 }
    326 
    327 void
    328 cv_destroy(kcondvar_t *cv)
    329 {
    330 
    331 	rumpuser_cv_destroy(RUMPCV(cv));
    332 }
    333 
    334 static int
    335 docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts)
    336 {
    337 	struct lwp *l = curlwp;
    338 	int rv;
    339 
    340 	if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
    341 		/*
    342 		 * yield() here, someone might want the cpu
    343 		 * to set a condition.  otherwise we'll just
    344 		 * loop forever.
    345 		 */
    346 		yield();
    347 		return EINTR;
    348 	}
    349 
    350 	UNLOCKED(mtx, false);
    351 
    352 	l->l_private = cv;
    353 	rv = 0;
    354 	if (ts) {
    355 		if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
    356 		    ts->tv_sec, ts->tv_nsec))
    357 			rv = EWOULDBLOCK;
    358 	} else {
    359 		rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
    360 	}
    361 
    362 	LOCKED(mtx, false);
    363 
    364 	/*
    365 	 * Check for QEXIT.  if so, we need to wait here until we
    366 	 * are allowed to exit.
    367 	 */
    368 	if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
    369 		struct proc *p = l->l_proc;
    370 
    371 		mutex_exit(mtx); /* drop and retake later */
    372 
    373 		mutex_enter(p->p_lock);
    374 		while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) {
    375 			/* avoid recursion */
    376 			rumpuser_cv_wait(RUMPCV(&p->p_waitcv),
    377 			    RUMPMTX(p->p_lock));
    378 		}
    379 		KASSERT(p->p_sflag & PS_RUMP_LWPEXIT);
    380 		mutex_exit(p->p_lock);
    381 
    382 		/* ok, we can exit and remove "reference" to l->private */
    383 
    384 		mutex_enter(mtx);
    385 		rv = EINTR;
    386 	}
    387 	l->l_private = NULL;
    388 
    389 	return rv;
    390 }
    391 
    392 void
    393 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
    394 {
    395 
    396 	if (__predict_false(rump_threads == 0))
    397 		panic("cv_wait without threads");
    398 	(void) docvwait(cv, mtx, NULL);
    399 }
    400 
    401 int
    402 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
    403 {
    404 
    405 	if (__predict_false(rump_threads == 0))
    406 		panic("cv_wait without threads");
    407 	return docvwait(cv, mtx, NULL);
    408 }
    409 
    410 int
    411 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
    412 {
    413 	struct timespec ts;
    414 	extern int hz;
    415 	int rv;
    416 
    417 	if (ticks == 0) {
    418 		rv = cv_wait_sig(cv, mtx);
    419 	} else {
    420 		ts.tv_sec = ticks / hz;
    421 		ts.tv_nsec = (ticks % hz) * (1000000000/hz);
    422 		rv = docvwait(cv, mtx, &ts);
    423 	}
    424 
    425 	return rv;
    426 }
    427 __strong_alias(cv_timedwait_sig,cv_timedwait);
    428 
    429 void
    430 cv_signal(kcondvar_t *cv)
    431 {
    432 
    433 	rumpuser_cv_signal(RUMPCV(cv));
    434 }
    435 
    436 void
    437 cv_broadcast(kcondvar_t *cv)
    438 {
    439 
    440 	rumpuser_cv_broadcast(RUMPCV(cv));
    441 }
    442 
    443 bool
    444 cv_has_waiters(kcondvar_t *cv)
    445 {
    446 	int rv;
    447 
    448 	rumpuser_cv_has_waiters(RUMPCV(cv), &rv);
    449 	return rv != 0;
    450 }
    451 
    452 /* this is not much of an attempt, but ... */
    453 bool
    454 cv_is_valid(kcondvar_t *cv)
    455 {
    456 
    457 	return RUMPCV(cv) != NULL;
    458 }
    459