Home | History | Annotate | Line # | Download | only in rumpkern
locks.c revision 1.69.4.3
      1 /*	$NetBSD: locks.c,v 1.69.4.3 2017/02/05 13:41:00 skrll Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2007-2011 Antti Kantee.  All Rights Reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     18  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     25  * SUCH DAMAGE.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.69.4.3 2017/02/05 13:41:00 skrll Exp $");
     30 
     31 #include <sys/param.h>
     32 #include <sys/kmem.h>
     33 #include <sys/mutex.h>
     34 #include <sys/rwlock.h>
     35 
     36 #include <rump-sys/kern.h>
     37 
     38 #include <rump/rumpuser.h>
     39 
     40 #ifdef LOCKDEBUG
     41 const int rump_lockdebug = 1;
     42 #else
     43 const int rump_lockdebug = 0;
     44 #endif
     45 
     46 /*
     47  * Simple lockdebug.  If it's compiled in, it's always active.
     48  * Currently available only for mtx/rwlock.
     49  */
     50 #ifdef LOCKDEBUG
     51 #include <sys/lockdebug.h>
     52 
     53 static lockops_t mutex_lockops = {
     54 	"mutex",
     55 	LOCKOPS_SLEEP,
     56 	NULL
     57 };
     58 static lockops_t rw_lockops = {
     59 	"rwlock",
     60 	LOCKOPS_SLEEP,
     61 	NULL
     62 };
     63 
     64 #define ALLOCK(lock, ops)				\
     65     lockdebug_alloc(__func__, __LINE__, lock, ops,	\
     66     (uintptr_t)__builtin_return_address(0))
     67 #define FREELOCK(lock)			\
     68     lockdebug_free(__func__, __LINE__, lock)
     69 #define WANTLOCK(lock, shar)				\
     70     lockdebug_wantlock(__func__, __LINE__, lock,	\
     71     (uintptr_t)__builtin_return_address(0), shar)
     72 #define LOCKED(lock, shar)				\
     73     lockdebug_locked(__func__, __LINE__, lock, NULL,	\
     74     (uintptr_t)__builtin_return_address(0), shar)
     75 #define UNLOCKED(lock, shar)		\
     76     lockdebug_unlocked(__func__, __LINE__, lock,	\
     77     (uintptr_t)__builtin_return_address(0), shar)
     78 #define BARRIER(lock, slp)		\
     79     lockdebug_barrier(__func__, __LINE__, lock, slp)
     80 #else
     81 #define ALLOCK(a, b)
     82 #define FREELOCK(a)
     83 #define WANTLOCK(a, b)
     84 #define LOCKED(a, b)
     85 #define UNLOCKED(a, b)
     86 #define BARRIER(a, b)
     87 #endif
     88 
     89 /*
     90  * We map locks to pthread routines.  The difference between kernel
     91  * and rumpuser routines is that while the kernel uses static
     92  * storage, rumpuser allocates the object from the heap.  This
     93  * indirection is necessary because we don't know the size of
     94  * pthread objects here.  It is also beneficial, since we can
     95  * be easily compatible with the kernel ABI because all kernel
     96  * objects regardless of machine architecture are always at least
     97  * the size of a pointer.  The downside, of course, is a performance
     98  * penalty.
     99  */
    100 
    101 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx))
    102 
    103 void
    104 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
    105 {
    106 	int ruflags = RUMPUSER_MTX_KMUTEX;
    107 	int isspin;
    108 
    109 	CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
    110 
    111 	/*
    112 	 * Try to figure out if the caller wanted a spin mutex or
    113 	 * not with this easy set of conditionals.  The difference
    114 	 * between a spin mutex and an adaptive mutex for a rump
    115 	 * kernel is that the hypervisor does not relinquish the
    116 	 * rump kernel CPU context for a spin mutex.  The
    117 	 * hypervisor itself may block even when "spinning".
    118 	 */
    119 	if (type == MUTEX_SPIN) {
    120 		isspin = 1;
    121 	} else if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
    122 	    ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
    123 	    ipl == IPL_SOFTSERIAL) {
    124 		isspin = 0;
    125 	} else {
    126 		isspin = 1;
    127 	}
    128 
    129 	if (isspin)
    130 		ruflags |= RUMPUSER_MTX_SPIN;
    131 	rumpuser_mutex_init((struct rumpuser_mtx **)mtx, ruflags);
    132 	ALLOCK(mtx, &mutex_lockops);
    133 }
    134 
    135 void
    136 mutex_destroy(kmutex_t *mtx)
    137 {
    138 
    139 	FREELOCK(mtx);
    140 	rumpuser_mutex_destroy(RUMPMTX(mtx));
    141 }
    142 
    143 void
    144 mutex_enter(kmutex_t *mtx)
    145 {
    146 
    147 	WANTLOCK(mtx, 0);
    148 	BARRIER(mtx, 1);
    149 	rumpuser_mutex_enter(RUMPMTX(mtx));
    150 	LOCKED(mtx, false);
    151 }
    152 
    153 void
    154 mutex_spin_enter(kmutex_t *mtx)
    155 {
    156 
    157 	WANTLOCK(mtx, 0);
    158 	BARRIER(mtx, 1);
    159 	rumpuser_mutex_enter_nowrap(RUMPMTX(mtx));
    160 	LOCKED(mtx, false);
    161 }
    162 
    163 int
    164 mutex_tryenter(kmutex_t *mtx)
    165 {
    166 	int error;
    167 
    168 	error = rumpuser_mutex_tryenter(RUMPMTX(mtx));
    169 	if (error == 0) {
    170 		WANTLOCK(mtx, 0);
    171 		LOCKED(mtx, false);
    172 	}
    173 	return error == 0;
    174 }
    175 
    176 void
    177 mutex_exit(kmutex_t *mtx)
    178 {
    179 
    180 	UNLOCKED(mtx, false);
    181 	rumpuser_mutex_exit(RUMPMTX(mtx));
    182 }
    183 __strong_alias(mutex_spin_exit,mutex_exit);
    184 
    185 int
    186 mutex_owned(kmutex_t *mtx)
    187 {
    188 
    189 	return mutex_owner(mtx) == curlwp;
    190 }
    191 
    192 struct lwp *
    193 mutex_owner(kmutex_t *mtx)
    194 {
    195 	struct lwp *l;
    196 
    197 	rumpuser_mutex_owner(RUMPMTX(mtx), &l);
    198 	return l;
    199 }
    200 
    201 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
    202 
    203 /* reader/writer locks */
    204 
    205 static enum rumprwlock
    206 krw2rumprw(const krw_t op)
    207 {
    208 
    209 	switch (op) {
    210 	case RW_READER:
    211 		return RUMPUSER_RW_READER;
    212 	case RW_WRITER:
    213 		return RUMPUSER_RW_WRITER;
    214 	default:
    215 		panic("unknown rwlock type");
    216 	}
    217 }
    218 
    219 void
    220 rw_init(krwlock_t *rw)
    221 {
    222 
    223 	CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
    224 
    225 	rumpuser_rw_init((struct rumpuser_rw **)rw);
    226 	ALLOCK(rw, &rw_lockops);
    227 }
    228 
    229 void
    230 rw_destroy(krwlock_t *rw)
    231 {
    232 
    233 	FREELOCK(rw);
    234 	rumpuser_rw_destroy(RUMPRW(rw));
    235 }
    236 
    237 void
    238 rw_enter(krwlock_t *rw, const krw_t op)
    239 {
    240 
    241 	WANTLOCK(rw, op == RW_READER);
    242 	BARRIER(rw, 1);
    243 	rumpuser_rw_enter(krw2rumprw(op), RUMPRW(rw));
    244 	LOCKED(rw, op == RW_READER);
    245 }
    246 
    247 int
    248 rw_tryenter(krwlock_t *rw, const krw_t op)
    249 {
    250 	int error;
    251 
    252 	error = rumpuser_rw_tryenter(krw2rumprw(op), RUMPRW(rw));
    253 	if (error == 0) {
    254 		WANTLOCK(rw, op == RW_READER);
    255 		LOCKED(rw, op == RW_READER);
    256 	}
    257 	return error == 0;
    258 }
    259 
    260 void
    261 rw_exit(krwlock_t *rw)
    262 {
    263 
    264 #ifdef LOCKDEBUG
    265 	bool shared = !rw_write_held(rw);
    266 
    267 	if (shared)
    268 		KASSERT(rw_read_held(rw));
    269 	UNLOCKED(rw, shared);
    270 #endif
    271 	rumpuser_rw_exit(RUMPRW(rw));
    272 }
    273 
    274 int
    275 rw_tryupgrade(krwlock_t *rw)
    276 {
    277 	int rv;
    278 
    279 	rv = rumpuser_rw_tryupgrade(RUMPRW(rw));
    280 	if (rv == 0) {
    281 		UNLOCKED(rw, 1);
    282 		WANTLOCK(rw, 0);
    283 		LOCKED(rw, 0);
    284 	}
    285 	return rv == 0;
    286 }
    287 
    288 void
    289 rw_downgrade(krwlock_t *rw)
    290 {
    291 
    292 	rumpuser_rw_downgrade(RUMPRW(rw));
    293 	UNLOCKED(rw, 0);
    294 	WANTLOCK(rw, 1);
    295 	LOCKED(rw, 1);
    296 }
    297 
    298 int
    299 rw_read_held(krwlock_t *rw)
    300 {
    301 	int rv;
    302 
    303 	rumpuser_rw_held(RUMPUSER_RW_READER, RUMPRW(rw), &rv);
    304 	return rv;
    305 }
    306 
    307 int
    308 rw_write_held(krwlock_t *rw)
    309 {
    310 	int rv;
    311 
    312 	rumpuser_rw_held(RUMPUSER_RW_WRITER, RUMPRW(rw), &rv);
    313 	return rv;
    314 }
    315 
    316 int
    317 rw_lock_held(krwlock_t *rw)
    318 {
    319 
    320 	return rw_read_held(rw) || rw_write_held(rw);
    321 }
    322 
    323 /* curriculum vitaes */
    324 
    325 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
    326 
    327 void
    328 cv_init(kcondvar_t *cv, const char *msg)
    329 {
    330 
    331 	CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
    332 
    333 	rumpuser_cv_init((struct rumpuser_cv **)cv);
    334 }
    335 
    336 void
    337 cv_destroy(kcondvar_t *cv)
    338 {
    339 
    340 	rumpuser_cv_destroy(RUMPCV(cv));
    341 }
    342 
    343 static int
    344 docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts)
    345 {
    346 	struct lwp *l = curlwp;
    347 	int rv;
    348 
    349 	if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
    350 		/*
    351 		 * yield() here, someone might want the cpu
    352 		 * to set a condition.  otherwise we'll just
    353 		 * loop forever.
    354 		 */
    355 		yield();
    356 		return EINTR;
    357 	}
    358 
    359 	UNLOCKED(mtx, false);
    360 
    361 	l->l_private = cv;
    362 	rv = 0;
    363 	if (ts) {
    364 		if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
    365 		    ts->tv_sec, ts->tv_nsec))
    366 			rv = EWOULDBLOCK;
    367 	} else {
    368 		rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
    369 	}
    370 
    371 	LOCKED(mtx, false);
    372 
    373 	/*
    374 	 * Check for QEXIT.  if so, we need to wait here until we
    375 	 * are allowed to exit.
    376 	 */
    377 	if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
    378 		struct proc *p = l->l_proc;
    379 
    380 		mutex_exit(mtx); /* drop and retake later */
    381 
    382 		mutex_enter(p->p_lock);
    383 		while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) {
    384 			/* avoid recursion */
    385 			rumpuser_cv_wait(RUMPCV(&p->p_waitcv),
    386 			    RUMPMTX(p->p_lock));
    387 		}
    388 		KASSERT(p->p_sflag & PS_RUMP_LWPEXIT);
    389 		mutex_exit(p->p_lock);
    390 
    391 		/* ok, we can exit and remove "reference" to l->private */
    392 
    393 		mutex_enter(mtx);
    394 		rv = EINTR;
    395 	}
    396 	l->l_private = NULL;
    397 
    398 	return rv;
    399 }
    400 
    401 void
    402 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
    403 {
    404 
    405 	if (__predict_false(rump_threads == 0))
    406 		panic("cv_wait without threads");
    407 	(void) docvwait(cv, mtx, NULL);
    408 }
    409 
    410 int
    411 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
    412 {
    413 
    414 	if (__predict_false(rump_threads == 0))
    415 		panic("cv_wait without threads");
    416 	return docvwait(cv, mtx, NULL);
    417 }
    418 
    419 int
    420 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
    421 {
    422 	struct timespec ts;
    423 	extern int hz;
    424 	int rv;
    425 
    426 	if (ticks == 0) {
    427 		rv = cv_wait_sig(cv, mtx);
    428 	} else {
    429 		ts.tv_sec = ticks / hz;
    430 		ts.tv_nsec = (ticks % hz) * (1000000000/hz);
    431 		rv = docvwait(cv, mtx, &ts);
    432 	}
    433 
    434 	return rv;
    435 }
    436 __strong_alias(cv_timedwait_sig,cv_timedwait);
    437 
    438 void
    439 cv_signal(kcondvar_t *cv)
    440 {
    441 
    442 	rumpuser_cv_signal(RUMPCV(cv));
    443 }
    444 
    445 void
    446 cv_broadcast(kcondvar_t *cv)
    447 {
    448 
    449 	rumpuser_cv_broadcast(RUMPCV(cv));
    450 }
    451 
    452 bool
    453 cv_has_waiters(kcondvar_t *cv)
    454 {
    455 	int rv;
    456 
    457 	rumpuser_cv_has_waiters(RUMPCV(cv), &rv);
    458 	return rv != 0;
    459 }
    460 
    461 /* this is not much of an attempt, but ... */
    462 bool
    463 cv_is_valid(kcondvar_t *cv)
    464 {
    465 
    466 	return RUMPCV(cv) != NULL;
    467 }
    468