Home | History | Annotate | Line # | Download | only in rumpkern
locks.c revision 1.80.4.1
      1 /*	$NetBSD: locks.c,v 1.80.4.1 2020/04/08 14:09:01 martin Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2007-2011 Antti Kantee.  All Rights Reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     18  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     25  * SUCH DAMAGE.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.80.4.1 2020/04/08 14:09:01 martin Exp $");
     30 
     31 #include <sys/param.h>
     32 #include <sys/kmem.h>
     33 #include <sys/mutex.h>
     34 #include <sys/rwlock.h>
     35 
     36 #include <rump-sys/kern.h>
     37 
     38 #include <rump/rumpuser.h>
     39 
     40 #ifdef LOCKDEBUG
     41 const int rump_lockdebug = 1;
     42 #else
     43 const int rump_lockdebug = 0;
     44 #endif
     45 
     46 /*
     47  * Simple lockdebug.  If it's compiled in, it's always active.
     48  * Currently available only for mtx/rwlock.
     49  */
     50 #ifdef LOCKDEBUG
     51 #include <sys/lockdebug.h>
     52 
     53 static lockops_t mutex_spin_lockops = {
     54 	.lo_name = "mutex",
     55 	.lo_type = LOCKOPS_SPIN,
     56 	.lo_dump = NULL,
     57 };
     58 static lockops_t mutex_adaptive_lockops = {
     59 	.lo_name = "mutex",
     60 	.lo_type = LOCKOPS_SLEEP,
     61 	.lo_dump = NULL,
     62 };
     63 static lockops_t rw_lockops = {
     64 	.lo_name = "rwlock",
     65 	.lo_type = LOCKOPS_SLEEP,
     66 	.lo_dump = NULL,
     67 };
     68 
     69 #define ALLOCK(lock, ops, return_address)		\
     70 	lockdebug_alloc(__func__, __LINE__, lock, ops,	\
     71 	    return_address)
     72 #define FREELOCK(lock)					\
     73 	lockdebug_free(__func__, __LINE__, lock)
     74 #define WANTLOCK(lock, shar)				\
     75 	lockdebug_wantlock(__func__, __LINE__, lock,	\
     76 	    (uintptr_t)__builtin_return_address(0), shar)
     77 #define LOCKED(lock, shar)				\
     78 	lockdebug_locked(__func__, __LINE__, lock, NULL,\
     79 	    (uintptr_t)__builtin_return_address(0), shar)
     80 #define UNLOCKED(lock, shar)				\
     81 	lockdebug_unlocked(__func__, __LINE__, lock,	\
     82 	    (uintptr_t)__builtin_return_address(0), shar)
     83 #define BARRIER(lock, slp)				\
     84 	lockdebug_barrier(__func__, __LINE__, lock, slp)
     85 #else
     86 #define ALLOCK(a, b, c)	do {} while (0)
     87 #define FREELOCK(a)	do {} while (0)
     88 #define WANTLOCK(a, b)	do {} while (0)
     89 #define LOCKED(a, b)	do {} while (0)
     90 #define UNLOCKED(a, b)	do {} while (0)
     91 #define BARRIER(a, b)	do {} while (0)
     92 #endif
     93 
     94 /*
     95  * We map locks to pthread routines.  The difference between kernel
     96  * and rumpuser routines is that while the kernel uses static
     97  * storage, rumpuser allocates the object from the heap.  This
     98  * indirection is necessary because we don't know the size of
     99  * pthread objects here.  It is also beneficial, since we can
    100  * be easily compatible with the kernel ABI because all kernel
    101  * objects regardless of machine architecture are always at least
    102  * the size of a pointer.  The downside, of course, is a performance
    103  * penalty.
    104  */
    105 
    106 #define RUMPMTX(mtx) (*(struct rumpuser_mtx *const*)(mtx))
    107 
    108 void _mutex_init(kmutex_t *, kmutex_type_t, int, uintptr_t);
    109 void
    110 _mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl, uintptr_t return_address)
    111 {
    112 	int ruflags = RUMPUSER_MTX_KMUTEX;
    113 	int isspin;
    114 
    115 	CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
    116 
    117 	/*
    118 	 * Try to figure out if the caller wanted a spin mutex or
    119 	 * not with this easy set of conditionals.  The difference
    120 	 * between a spin mutex and an adaptive mutex for a rump
    121 	 * kernel is that the hypervisor does not relinquish the
    122 	 * rump kernel CPU context for a spin mutex.  The
    123 	 * hypervisor itself may block even when "spinning".
    124 	 */
    125 	if (type == MUTEX_SPIN) {
    126 		isspin = 1;
    127 	} else if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
    128 	    ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
    129 	    ipl == IPL_SOFTSERIAL) {
    130 		isspin = 0;
    131 	} else {
    132 		isspin = 1;
    133 	}
    134 
    135 	if (isspin)
    136 		ruflags |= RUMPUSER_MTX_SPIN;
    137 	rumpuser_mutex_init((struct rumpuser_mtx **)mtx, ruflags);
    138 	if (isspin)
    139 		ALLOCK(mtx, &mutex_spin_lockops, return_address);
    140 	else
    141 		ALLOCK(mtx, &mutex_adaptive_lockops, return_address);
    142 }
    143 
    144 void
    145 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
    146 {
    147 
    148 	_mutex_init(mtx, type, ipl, (uintptr_t)__builtin_return_address(0));
    149 }
    150 
    151 void
    152 mutex_destroy(kmutex_t *mtx)
    153 {
    154 
    155 	FREELOCK(mtx);
    156 	rumpuser_mutex_destroy(RUMPMTX(mtx));
    157 }
    158 
    159 void
    160 mutex_enter(kmutex_t *mtx)
    161 {
    162 
    163 	WANTLOCK(mtx, 0);
    164 	if (!rumpuser_mutex_spin_p(RUMPMTX(mtx)))
    165 		BARRIER(mtx, 1);
    166 	rumpuser_mutex_enter(RUMPMTX(mtx));
    167 	LOCKED(mtx, false);
    168 }
    169 
    170 void
    171 mutex_spin_enter(kmutex_t *mtx)
    172 {
    173 
    174 	KASSERT(rumpuser_mutex_spin_p(RUMPMTX(mtx)));
    175 	WANTLOCK(mtx, 0);
    176 	rumpuser_mutex_enter_nowrap(RUMPMTX(mtx));
    177 	LOCKED(mtx, false);
    178 }
    179 
    180 int
    181 mutex_tryenter(kmutex_t *mtx)
    182 {
    183 	int error;
    184 
    185 	error = rumpuser_mutex_tryenter(RUMPMTX(mtx));
    186 	if (error == 0) {
    187 		WANTLOCK(mtx, 0);
    188 		LOCKED(mtx, false);
    189 	}
    190 	return error == 0;
    191 }
    192 
    193 void
    194 mutex_exit(kmutex_t *mtx)
    195 {
    196 
    197 #ifndef LOCKDEBUG
    198 	KASSERT(mutex_owned(mtx));
    199 #endif
    200 	UNLOCKED(mtx, false);
    201 	rumpuser_mutex_exit(RUMPMTX(mtx));
    202 }
    203 __strong_alias(mutex_spin_exit,mutex_exit);
    204 
    205 int
    206 mutex_ownable(const kmutex_t *mtx)
    207 {
    208 
    209 #ifdef LOCKDEBUG
    210 	WANTLOCK(mtx, -1);
    211 #endif
    212 	return 1;
    213 }
    214 
    215 int
    216 mutex_owned(const kmutex_t *mtx)
    217 {
    218 
    219 	return mutex_owner(mtx) == curlwp;
    220 }
    221 
    222 lwp_t *
    223 mutex_owner(const kmutex_t *mtx)
    224 {
    225 	struct lwp *l;
    226 
    227 	rumpuser_mutex_owner(RUMPMTX(mtx), &l);
    228 	return l;
    229 }
    230 
    231 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
    232 
    233 /* reader/writer locks */
    234 
    235 static enum rumprwlock
    236 krw2rumprw(const krw_t op)
    237 {
    238 
    239 	switch (op) {
    240 	case RW_READER:
    241 		return RUMPUSER_RW_READER;
    242 	case RW_WRITER:
    243 		return RUMPUSER_RW_WRITER;
    244 	default:
    245 		panic("unknown rwlock type");
    246 	}
    247 }
    248 
    249 void _rw_init(krwlock_t *, uintptr_t);
    250 void
    251 _rw_init(krwlock_t *rw, uintptr_t return_address)
    252 {
    253 
    254 	CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
    255 
    256 	rumpuser_rw_init((struct rumpuser_rw **)rw);
    257 	ALLOCK(rw, &rw_lockops, return_address);
    258 }
    259 
    260 void
    261 rw_init(krwlock_t *rw)
    262 {
    263 
    264 	_rw_init(rw, (uintptr_t)__builtin_return_address(0));
    265 }
    266 
    267 void
    268 rw_destroy(krwlock_t *rw)
    269 {
    270 
    271 	FREELOCK(rw);
    272 	rumpuser_rw_destroy(RUMPRW(rw));
    273 }
    274 
    275 void
    276 rw_enter(krwlock_t *rw, const krw_t op)
    277 {
    278 
    279 	WANTLOCK(rw, op == RW_READER);
    280 	BARRIER(rw, 1);
    281 	rumpuser_rw_enter(krw2rumprw(op), RUMPRW(rw));
    282 	LOCKED(rw, op == RW_READER);
    283 }
    284 
    285 int
    286 rw_tryenter(krwlock_t *rw, const krw_t op)
    287 {
    288 	int error;
    289 
    290 	error = rumpuser_rw_tryenter(krw2rumprw(op), RUMPRW(rw));
    291 	if (error == 0) {
    292 		WANTLOCK(rw, op == RW_READER);
    293 		LOCKED(rw, op == RW_READER);
    294 	}
    295 	return error == 0;
    296 }
    297 
    298 void
    299 rw_exit(krwlock_t *rw)
    300 {
    301 
    302 #ifdef LOCKDEBUG
    303 	bool shared = !rw_write_held(rw);
    304 
    305 	if (shared)
    306 		KASSERT(rw_read_held(rw));
    307 	UNLOCKED(rw, shared);
    308 #endif
    309 	rumpuser_rw_exit(RUMPRW(rw));
    310 }
    311 
    312 int
    313 rw_tryupgrade(krwlock_t *rw)
    314 {
    315 	int rv;
    316 
    317 	rv = rumpuser_rw_tryupgrade(RUMPRW(rw));
    318 	if (rv == 0) {
    319 		UNLOCKED(rw, 1);
    320 		WANTLOCK(rw, 0);
    321 		LOCKED(rw, 0);
    322 	}
    323 	return rv == 0;
    324 }
    325 
    326 void
    327 rw_downgrade(krwlock_t *rw)
    328 {
    329 
    330 	rumpuser_rw_downgrade(RUMPRW(rw));
    331 	UNLOCKED(rw, 0);
    332 	WANTLOCK(rw, 1);
    333 	LOCKED(rw, 1);
    334 }
    335 
    336 int
    337 rw_read_held(krwlock_t *rw)
    338 {
    339 	int rv;
    340 
    341 	rumpuser_rw_held(RUMPUSER_RW_READER, RUMPRW(rw), &rv);
    342 	return rv;
    343 }
    344 
    345 int
    346 rw_write_held(krwlock_t *rw)
    347 {
    348 	int rv;
    349 
    350 	rumpuser_rw_held(RUMPUSER_RW_WRITER, RUMPRW(rw), &rv);
    351 	return rv;
    352 }
    353 
    354 int
    355 rw_lock_held(krwlock_t *rw)
    356 {
    357 
    358 	return rw_read_held(rw) || rw_write_held(rw);
    359 }
    360 
    361 krw_t
    362 rw_lock_op(krwlock_t *rw)
    363 {
    364 
    365 	return rw_write_held(rw) ? RW_WRITER : RW_READER;
    366 }
    367 
    368 /* curriculum vitaes */
    369 
    370 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
    371 
    372 void
    373 cv_init(kcondvar_t *cv, const char *msg)
    374 {
    375 
    376 	CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
    377 
    378 	rumpuser_cv_init((struct rumpuser_cv **)cv);
    379 }
    380 
    381 void
    382 cv_destroy(kcondvar_t *cv)
    383 {
    384 
    385 	rumpuser_cv_destroy(RUMPCV(cv));
    386 }
    387 
    388 static int
    389 docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts)
    390 {
    391 	struct lwp *l = curlwp;
    392 	int rv;
    393 
    394 	if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
    395 		/*
    396 		 * yield() here, someone might want the cpu
    397 		 * to set a condition.  otherwise we'll just
    398 		 * loop forever.
    399 		 */
    400 		yield();
    401 		return EINTR;
    402 	}
    403 
    404 	UNLOCKED(mtx, false);
    405 
    406 	l->l_private = cv;
    407 	rv = 0;
    408 	if (ts) {
    409 		if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
    410 		    ts->tv_sec, ts->tv_nsec))
    411 			rv = EWOULDBLOCK;
    412 	} else {
    413 		rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
    414 	}
    415 
    416 	LOCKED(mtx, false);
    417 
    418 	/*
    419 	 * Check for QEXIT.  if so, we need to wait here until we
    420 	 * are allowed to exit.
    421 	 */
    422 	if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
    423 		struct proc *p = l->l_proc;
    424 
    425 		mutex_exit(mtx); /* drop and retake later */
    426 
    427 		mutex_enter(p->p_lock);
    428 		while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) {
    429 			/* avoid recursion */
    430 			rumpuser_cv_wait(RUMPCV(&p->p_waitcv),
    431 			    RUMPMTX(p->p_lock));
    432 		}
    433 		KASSERT(p->p_sflag & PS_RUMP_LWPEXIT);
    434 		mutex_exit(p->p_lock);
    435 
    436 		/* ok, we can exit and remove "reference" to l->private */
    437 
    438 		mutex_enter(mtx);
    439 		rv = EINTR;
    440 	}
    441 	l->l_private = NULL;
    442 
    443 	return rv;
    444 }
    445 
    446 void
    447 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
    448 {
    449 
    450 	if (__predict_false(rump_threads == 0))
    451 		panic("cv_wait without threads");
    452 	(void) docvwait(cv, mtx, NULL);
    453 }
    454 
    455 int
    456 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
    457 {
    458 
    459 	if (__predict_false(rump_threads == 0))
    460 		panic("cv_wait without threads");
    461 	return docvwait(cv, mtx, NULL);
    462 }
    463 
    464 int
    465 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
    466 {
    467 	struct timespec ts;
    468 	extern int hz;
    469 	int rv;
    470 
    471 	if (ticks == 0) {
    472 		rv = cv_wait_sig(cv, mtx);
    473 	} else {
    474 		ts.tv_sec = ticks / hz;
    475 		ts.tv_nsec = (ticks % hz) * (1000000000/hz);
    476 		rv = docvwait(cv, mtx, &ts);
    477 	}
    478 
    479 	return rv;
    480 }
    481 __strong_alias(cv_timedwait_sig,cv_timedwait);
    482 
    483 void
    484 cv_signal(kcondvar_t *cv)
    485 {
    486 
    487 	rumpuser_cv_signal(RUMPCV(cv));
    488 }
    489 
    490 void
    491 cv_broadcast(kcondvar_t *cv)
    492 {
    493 
    494 	rumpuser_cv_broadcast(RUMPCV(cv));
    495 }
    496 
    497 bool
    498 cv_has_waiters(kcondvar_t *cv)
    499 {
    500 	int rv;
    501 
    502 	rumpuser_cv_has_waiters(RUMPCV(cv), &rv);
    503 	return rv != 0;
    504 }
    505 
    506 /* this is not much of an attempt, but ... */
    507 bool
    508 cv_is_valid(kcondvar_t *cv)
    509 {
    510 
    511 	return RUMPCV(cv) != NULL;
    512 }
    513