Home | History | Annotate | Line # | Download | only in rumpkern
locks.c revision 1.23
      1 /*	$NetBSD: locks.c,v 1.23 2008/12/18 00:24:12 pooka Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26  * POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 /*
     30  * Copyright (c) 2007, 2008 Antti Kantee.  All Rights Reserved.
     31  *
     32  * Development of this software was supported by the
     33  * Finnish Cultural Foundation.
     34  *
     35  * Redistribution and use in source and binary forms, with or without
     36  * modification, are permitted provided that the following conditions
     37  * are met:
     38  * 1. Redistributions of source code must retain the above copyright
     39  *    notice, this list of conditions and the following disclaimer.
     40  * 2. Redistributions in binary form must reproduce the above copyright
     41  *    notice, this list of conditions and the following disclaimer in the
     42  *    documentation and/or other materials provided with the distribution.
     43  *
     44  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     45  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     46  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     47  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     48  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     49  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     50  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     51  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     52  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     53  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     54  * SUCH DAMAGE.
     55  */
     56 
     57 #include <sys/cdefs.h>
     58 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.23 2008/12/18 00:24:12 pooka Exp $");
     59 
     60 #include <sys/param.h>
     61 #include <sys/mutex.h>
     62 #include <sys/rwlock.h>
     63 #include <sys/atomic.h>
     64 
     65 #include <rump/rumpuser.h>
     66 
     67 #include "rump_private.h"
     68 
     69 /*
     70  * We map locks to pthread routines.  The difference between kernel
     71  * and rumpuser routines is that while the kernel uses static
     72  * storage, rumpuser allocates the object from the heap.  This
     73  * indirection is necessary because we don't know the size of
     74  * pthread objects here.  It is also benefitial, since we can
     75  * be easily compatible with the kernel ABI because all kernel
     76  * objects regardless of machine architecture are always at least
     77  * the size of a pointer.  The downside, of course, is a performance
     78  * penalty.
     79  */
     80 
     81 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx))
     82 
     83 void
     84 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
     85 {
     86 
     87 	CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
     88 
     89 	rumpuser_mutex_init((struct rumpuser_mtx **)mtx);
     90 }
     91 
     92 void
     93 mutex_destroy(kmutex_t *mtx)
     94 {
     95 
     96 	rumpuser_mutex_destroy(RUMPMTX(mtx));
     97 }
     98 
     99 void
    100 mutex_enter(kmutex_t *mtx)
    101 {
    102 
    103 	rumpuser_mutex_enter(RUMPMTX(mtx));
    104 }
    105 
    106 void
    107 mutex_spin_enter(kmutex_t *mtx)
    108 {
    109 
    110 	if (__predict_true(mtx != RUMP_LMUTEX_MAGIC))
    111 		mutex_enter(mtx);
    112 }
    113 
    114 int
    115 mutex_tryenter(kmutex_t *mtx)
    116 {
    117 
    118 	return rumpuser_mutex_tryenter(RUMPMTX(mtx));
    119 }
    120 
    121 void
    122 mutex_exit(kmutex_t *mtx)
    123 {
    124 
    125 	rumpuser_mutex_exit(RUMPMTX(mtx));
    126 }
    127 
    128 void
    129 mutex_spin_exit(kmutex_t *mtx)
    130 {
    131 
    132 	if (__predict_true(mtx != RUMP_LMUTEX_MAGIC))
    133 		mutex_exit(mtx);
    134 }
    135 
    136 int
    137 mutex_owned(kmutex_t *mtx)
    138 {
    139 
    140 	return rumpuser_mutex_held(RUMPMTX(mtx));
    141 }
    142 
    143 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
    144 
    145 /* reader/writer locks */
    146 
    147 void
    148 rw_init(krwlock_t *rw)
    149 {
    150 
    151 	CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
    152 
    153 	rumpuser_rw_init((struct rumpuser_rw **)rw);
    154 }
    155 
    156 void
    157 rw_destroy(krwlock_t *rw)
    158 {
    159 
    160 	rumpuser_rw_destroy(RUMPRW(rw));
    161 }
    162 
    163 void
    164 rw_enter(krwlock_t *rw, const krw_t op)
    165 {
    166 
    167 	rumpuser_rw_enter(RUMPRW(rw), op == RW_WRITER);
    168 }
    169 
    170 int
    171 rw_tryenter(krwlock_t *rw, const krw_t op)
    172 {
    173 
    174 	return rumpuser_rw_tryenter(RUMPRW(rw), op == RW_WRITER);
    175 }
    176 
    177 void
    178 rw_exit(krwlock_t *rw)
    179 {
    180 
    181 	rumpuser_rw_exit(RUMPRW(rw));
    182 }
    183 
    184 /* always fails */
    185 int
    186 rw_tryupgrade(krwlock_t *rw)
    187 {
    188 
    189 	return 0;
    190 }
    191 
    192 int
    193 rw_write_held(krwlock_t *rw)
    194 {
    195 
    196 	return rumpuser_rw_wrheld(RUMPRW(rw));
    197 }
    198 
    199 int
    200 rw_read_held(krwlock_t *rw)
    201 {
    202 
    203 	return rumpuser_rw_rdheld(RUMPRW(rw));
    204 }
    205 
    206 int
    207 rw_lock_held(krwlock_t *rw)
    208 {
    209 
    210 	return rumpuser_rw_held(RUMPRW(rw));
    211 }
    212 
    213 /* curriculum vitaes */
    214 
    215 /* forgive me for I have sinned */
    216 #define RUMPCV(a) ((struct rumpuser_cv *)(__UNCONST((a)->cv_wmesg)))
    217 
    218 void
    219 cv_init(kcondvar_t *cv, const char *msg)
    220 {
    221 
    222 	rumpuser_cv_init((struct rumpuser_cv **)__UNCONST(&cv->cv_wmesg));
    223 }
    224 
    225 void
    226 cv_destroy(kcondvar_t *cv)
    227 {
    228 
    229 	rumpuser_cv_destroy(RUMPCV(cv));
    230 }
    231 
    232 void
    233 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
    234 {
    235 
    236 	rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
    237 }
    238 
    239 int
    240 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
    241 {
    242 
    243 	rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
    244 	return 0;
    245 }
    246 
    247 int
    248 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
    249 {
    250 #ifdef DIAGNOSTIC
    251 	extern int hz;
    252 #endif
    253 
    254 	if (ticks == 0) {
    255 		cv_wait(cv, mtx);
    256 		return 0;
    257 	} else {
    258 		KASSERT(hz == 100);
    259 		return rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx), ticks);
    260 	}
    261 }
    262 
    263 int
    264 cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int ticks)
    265 {
    266 
    267 	return cv_timedwait(cv, mtx, ticks);
    268 }
    269 
    270 void
    271 cv_signal(kcondvar_t *cv)
    272 {
    273 
    274 	rumpuser_cv_signal(RUMPCV(cv));
    275 }
    276 
    277 void
    278 cv_broadcast(kcondvar_t *cv)
    279 {
    280 
    281 	rumpuser_cv_broadcast(RUMPCV(cv));
    282 }
    283 
    284 bool
    285 cv_has_waiters(kcondvar_t *cv)
    286 {
    287 
    288 	return rumpuser_cv_has_waiters(RUMPCV(cv));
    289 }
    290 
    291 /*
    292  * giant lock
    293  */
    294 
    295 static volatile int lockcnt;
    296 void
    297 _kernel_lock(int nlocks)
    298 {
    299 
    300 	while (nlocks--) {
    301 		rumpuser_mutex_enter(rump_giantlock);
    302 		lockcnt++;
    303 	}
    304 }
    305 
    306 void
    307 _kernel_unlock(int nlocks, int *countp)
    308 {
    309 
    310 	if (!rumpuser_mutex_held(rump_giantlock)) {
    311 		KASSERT(nlocks == 0);
    312 		if (countp)
    313 			*countp = 0;
    314 		return;
    315 	}
    316 
    317 	if (countp)
    318 		*countp = lockcnt;
    319 	if (nlocks == 0)
    320 		nlocks = lockcnt;
    321 	if (nlocks == -1) {
    322 		KASSERT(lockcnt == 1);
    323 		nlocks = 1;
    324 	}
    325 	KASSERT(nlocks <= lockcnt);
    326 	while (nlocks--) {
    327 		lockcnt--;
    328 		rumpuser_mutex_exit(rump_giantlock);
    329 	}
    330 }
    331 
    332 struct kmutexobj {
    333 	kmutex_t	mo_lock;
    334 	u_int		mo_refcnt;
    335 };
    336 
    337 kmutex_t *
    338 mutex_obj_alloc(kmutex_type_t type, int ipl)
    339 {
    340 	struct kmutexobj *mo;
    341 
    342 	mo = kmem_alloc(sizeof(*mo), KM_SLEEP);
    343 	mutex_init(&mo->mo_lock, type, ipl);
    344 	mo->mo_refcnt = 1;
    345 
    346 	return (kmutex_t *)mo;
    347 }
    348 
    349 void
    350 mutex_obj_hold(kmutex_t *lock)
    351 {
    352 	struct kmutexobj *mo = (struct kmutexobj *)lock;
    353 
    354 	atomic_inc_uint(&mo->mo_refcnt);
    355 }
    356 
    357 bool
    358 mutex_obj_free(kmutex_t *lock)
    359 {
    360 	struct kmutexobj *mo = (struct kmutexobj *)lock;
    361 
    362 	if (atomic_dec_uint_nv(&mo->mo_refcnt) > 0) {
    363 		return false;
    364 	}
    365 	mutex_destroy(&mo->mo_lock);
    366 	kmem_free(mo, sizeof(*mo));
    367 	return true;
    368 }
    369