Home | History | Annotate | Line # | Download | only in rumpkern
locks_up.c revision 1.10.18.1
      1  1.10.18.1    martin /*	$NetBSD: locks_up.c,v 1.10.18.1 2020/04/08 14:09:01 martin Exp $	*/
      2        1.1     pooka 
      3        1.1     pooka /*
      4        1.1     pooka  * Copyright (c) 2010 Antti Kantee.  All Rights Reserved.
      5        1.1     pooka  *
      6        1.1     pooka  * Redistribution and use in source and binary forms, with or without
      7        1.1     pooka  * modification, are permitted provided that the following conditions
      8        1.1     pooka  * are met:
      9        1.1     pooka  * 1. Redistributions of source code must retain the above copyright
     10        1.1     pooka  *    notice, this list of conditions and the following disclaimer.
     11        1.1     pooka  * 2. Redistributions in binary form must reproduce the above copyright
     12        1.1     pooka  *    notice, this list of conditions and the following disclaimer in the
     13        1.1     pooka  *    documentation and/or other materials provided with the distribution.
     14        1.1     pooka  *
     15        1.1     pooka  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     16        1.1     pooka  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     17        1.1     pooka  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     18        1.1     pooka  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     19        1.1     pooka  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     20        1.1     pooka  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     21        1.1     pooka  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     22        1.1     pooka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     23        1.1     pooka  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     24        1.1     pooka  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     25        1.1     pooka  * SUCH DAMAGE.
     26        1.1     pooka  */
     27        1.1     pooka 
     28        1.1     pooka /*
     29        1.1     pooka  * Virtual uniprocessor rump kernel version of locks.  Since the entire
     30        1.1     pooka  * kernel is running on only one CPU in the system, there is no need
     31        1.1     pooka  * to perform slow cache-coherent MP locking operations.  This speeds
     32        1.1     pooka  * up things quite dramatically and is a good example of that two
     33        1.1     pooka  * disjoint kernels running simultaneously in an MP system can be
     34        1.1     pooka  * massively faster than one with fine-grained locking.
     35        1.1     pooka  */
     36        1.1     pooka 
     37        1.1     pooka #include <sys/cdefs.h>
     38  1.10.18.1    martin __KERNEL_RCSID(0, "$NetBSD: locks_up.c,v 1.10.18.1 2020/04/08 14:09:01 martin Exp $");
     39        1.1     pooka 
     40        1.1     pooka #include <sys/param.h>
     41        1.1     pooka #include <sys/kernel.h>
     42        1.1     pooka #include <sys/kmem.h>
     43        1.1     pooka #include <sys/mutex.h>
     44        1.1     pooka #include <sys/rwlock.h>
     45        1.1     pooka 
     46       1.10     pooka #include <rump-sys/kern.h>
     47       1.10     pooka 
     48        1.1     pooka #include <rump/rumpuser.h>
     49        1.1     pooka 
     50        1.1     pooka struct upmtx {
     51        1.1     pooka 	struct lwp *upm_owner;
     52        1.1     pooka 	int upm_wanted;
     53        1.1     pooka 	struct rumpuser_cv *upm_rucv;
     54        1.1     pooka };
     55        1.1     pooka #define UPMTX(mtx) struct upmtx *upm = *(struct upmtx **)mtx
     56        1.1     pooka 
     57        1.1     pooka static inline void
     58        1.1     pooka checkncpu(void)
     59        1.1     pooka {
     60        1.1     pooka 
     61        1.1     pooka 	if (__predict_false(ncpu != 1))
     62        1.1     pooka 		panic("UP lock implementation requires RUMP_NCPU == 1");
     63        1.1     pooka }
     64        1.1     pooka 
     65        1.1     pooka void
     66        1.1     pooka mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
     67        1.1     pooka {
     68        1.1     pooka 	struct upmtx *upm;
     69        1.1     pooka 
     70        1.1     pooka 	CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
     71        1.1     pooka 	checkncpu();
     72        1.1     pooka 
     73        1.1     pooka 	/*
     74        1.7     pooka 	 * In uniprocessor locking we don't need to differentiate
     75        1.7     pooka 	 * between spin mutexes and adaptive ones.  We could
     76        1.7     pooka 	 * replace mutex_enter() with a NOP for spin mutexes, but
     77        1.7     pooka 	 * not bothering with that for now.
     78        1.7     pooka 	 */
     79        1.7     pooka 
     80        1.7     pooka 	/*
     81        1.1     pooka 	 * XXX: pool_cache would be nice, but not easily possible,
     82        1.1     pooka 	 * as pool cache init wants to call mutex_init() ...
     83        1.1     pooka 	 */
     84        1.3     pooka 	upm = rump_hypermalloc(sizeof(*upm), 0, true, "mutex_init");
     85        1.1     pooka 	memset(upm, 0, sizeof(*upm));
     86        1.1     pooka 	rumpuser_cv_init(&upm->upm_rucv);
     87        1.1     pooka 	memcpy(mtx, &upm, sizeof(void *));
     88        1.1     pooka }
     89        1.1     pooka 
     90        1.1     pooka void
     91        1.1     pooka mutex_destroy(kmutex_t *mtx)
     92        1.1     pooka {
     93        1.1     pooka 	UPMTX(mtx);
     94        1.1     pooka 
     95        1.1     pooka 	KASSERT(upm->upm_owner == NULL);
     96        1.1     pooka 	KASSERT(upm->upm_wanted == 0);
     97        1.1     pooka 	rumpuser_cv_destroy(upm->upm_rucv);
     98        1.4     pooka 	rump_hyperfree(upm, sizeof(*upm));
     99        1.1     pooka }
    100        1.1     pooka 
    101        1.1     pooka void
    102        1.1     pooka mutex_enter(kmutex_t *mtx)
    103        1.1     pooka {
    104        1.1     pooka 	UPMTX(mtx);
    105        1.1     pooka 
    106        1.1     pooka 	/* fastpath? */
    107        1.1     pooka 	if (mutex_tryenter(mtx))
    108        1.1     pooka 		return;
    109        1.1     pooka 
    110        1.1     pooka 	/*
    111        1.1     pooka 	 * No?  bummer, do it the slow and painful way then.
    112        1.1     pooka 	 */
    113        1.1     pooka 	upm->upm_wanted++;
    114        1.1     pooka 	while (!mutex_tryenter(mtx)) {
    115        1.1     pooka 		rump_schedlock_cv_wait(upm->upm_rucv);
    116        1.1     pooka 	}
    117        1.1     pooka 	upm->upm_wanted--;
    118        1.1     pooka 
    119        1.1     pooka 	KASSERT(upm->upm_wanted >= 0);
    120        1.1     pooka }
    121        1.1     pooka 
    122        1.1     pooka void
    123        1.1     pooka mutex_spin_enter(kmutex_t *mtx)
    124        1.1     pooka {
    125        1.1     pooka 
    126        1.1     pooka 	mutex_enter(mtx);
    127        1.1     pooka }
    128        1.1     pooka 
    129        1.1     pooka int
    130        1.1     pooka mutex_tryenter(kmutex_t *mtx)
    131        1.1     pooka {
    132        1.1     pooka 	UPMTX(mtx);
    133        1.1     pooka 
    134        1.1     pooka 	if (upm->upm_owner)
    135        1.1     pooka 		return 0;
    136        1.1     pooka 
    137        1.1     pooka 	upm->upm_owner = curlwp;
    138        1.1     pooka 	return 1;
    139        1.1     pooka }
    140        1.1     pooka 
    141        1.1     pooka void
    142        1.1     pooka mutex_exit(kmutex_t *mtx)
    143        1.1     pooka {
    144        1.1     pooka 	UPMTX(mtx);
    145        1.1     pooka 
    146        1.1     pooka 	if (upm->upm_wanted) {
    147        1.1     pooka 		rumpuser_cv_signal(upm->upm_rucv); /* CPU is our interlock */
    148        1.1     pooka 	}
    149        1.1     pooka 	upm->upm_owner = NULL;
    150        1.1     pooka }
    151        1.1     pooka 
    152        1.1     pooka void
    153        1.1     pooka mutex_spin_exit(kmutex_t *mtx)
    154        1.1     pooka {
    155        1.1     pooka 
    156        1.1     pooka 	mutex_exit(mtx);
    157        1.1     pooka }
    158        1.1     pooka 
    159        1.1     pooka int
    160        1.1     pooka mutex_owned(kmutex_t *mtx)
    161        1.1     pooka {
    162        1.1     pooka 	UPMTX(mtx);
    163        1.1     pooka 
    164        1.1     pooka 	return upm->upm_owner == curlwp;
    165        1.1     pooka }
    166        1.1     pooka 
    167        1.5     pooka struct lwp *
    168        1.5     pooka mutex_owner(kmutex_t *mtx)
    169        1.5     pooka {
    170        1.6  stacktic 	UPMTX(mtx);
    171        1.5     pooka 
    172        1.5     pooka 	return upm->upm_owner;
    173        1.5     pooka }
    174        1.5     pooka 
    175        1.1     pooka struct uprw {
    176        1.1     pooka 	struct lwp *uprw_owner;
    177        1.1     pooka 	int uprw_readers;
    178        1.1     pooka 	uint16_t uprw_rwant;
    179        1.1     pooka 	uint16_t uprw_wwant;
    180        1.1     pooka 	struct rumpuser_cv *uprw_rucv_reader;
    181        1.1     pooka 	struct rumpuser_cv *uprw_rucv_writer;
    182        1.1     pooka };
    183        1.1     pooka 
    184        1.1     pooka #define UPRW(rw) struct uprw *uprw = *(struct uprw **)rw
    185        1.1     pooka 
    186        1.1     pooka /* reader/writer locks */
    187        1.1     pooka 
    188        1.1     pooka void
    189        1.1     pooka rw_init(krwlock_t *rw)
    190        1.1     pooka {
    191        1.1     pooka 	struct uprw *uprw;
    192        1.1     pooka 
    193        1.1     pooka 	CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
    194        1.1     pooka 	checkncpu();
    195        1.1     pooka 
    196        1.3     pooka 	uprw = rump_hypermalloc(sizeof(*uprw), 0, true, "rwinit");
    197        1.1     pooka 	memset(uprw, 0, sizeof(*uprw));
    198        1.1     pooka 	rumpuser_cv_init(&uprw->uprw_rucv_reader);
    199        1.1     pooka 	rumpuser_cv_init(&uprw->uprw_rucv_writer);
    200        1.1     pooka 	memcpy(rw, &uprw, sizeof(void *));
    201        1.1     pooka }
    202        1.1     pooka 
    203        1.1     pooka void
    204        1.1     pooka rw_destroy(krwlock_t *rw)
    205        1.1     pooka {
    206        1.1     pooka 	UPRW(rw);
    207        1.1     pooka 
    208        1.1     pooka 	rumpuser_cv_destroy(uprw->uprw_rucv_reader);
    209        1.1     pooka 	rumpuser_cv_destroy(uprw->uprw_rucv_writer);
    210        1.4     pooka 	rump_hyperfree(uprw, sizeof(*uprw));
    211        1.1     pooka }
    212        1.1     pooka 
    213        1.1     pooka /* take rwlock.  prefer writers over readers (see rw_tryenter and rw_exit) */
    214        1.1     pooka void
    215        1.1     pooka rw_enter(krwlock_t *rw, const krw_t op)
    216        1.1     pooka {
    217        1.1     pooka 	UPRW(rw);
    218        1.1     pooka 	struct rumpuser_cv *rucv;
    219        1.1     pooka 	uint16_t *wp;
    220        1.1     pooka 
    221        1.1     pooka 	if (rw_tryenter(rw, op))
    222        1.1     pooka 		return;
    223        1.1     pooka 
    224        1.1     pooka 	/* lagpath */
    225        1.1     pooka 	if (op == RW_READER) {
    226        1.1     pooka 		rucv = uprw->uprw_rucv_reader;
    227        1.1     pooka 		wp = &uprw->uprw_rwant;
    228        1.1     pooka 	} else {
    229        1.1     pooka 		rucv = uprw->uprw_rucv_writer;
    230        1.1     pooka 		wp = &uprw->uprw_wwant;
    231        1.1     pooka 	}
    232        1.1     pooka 
    233        1.1     pooka 	(*wp)++;
    234        1.1     pooka 	while (!rw_tryenter(rw, op)) {
    235        1.1     pooka 		rump_schedlock_cv_wait(rucv);
    236        1.1     pooka 	}
    237        1.1     pooka 	(*wp)--;
    238        1.1     pooka }
    239        1.1     pooka 
    240        1.1     pooka int
    241        1.1     pooka rw_tryenter(krwlock_t *rw, const krw_t op)
    242        1.1     pooka {
    243        1.1     pooka 	UPRW(rw);
    244        1.1     pooka 
    245        1.1     pooka 	switch (op) {
    246        1.1     pooka 	case RW_READER:
    247        1.1     pooka 		if (uprw->uprw_owner == NULL && uprw->uprw_wwant == 0) {
    248        1.1     pooka 			uprw->uprw_readers++;
    249        1.1     pooka 			return 1;
    250        1.1     pooka 		}
    251        1.1     pooka 		break;
    252        1.1     pooka 	case RW_WRITER:
    253        1.1     pooka 		if (uprw->uprw_owner == NULL && uprw->uprw_readers == 0) {
    254        1.1     pooka 			uprw->uprw_owner = curlwp;
    255        1.1     pooka 			return 1;
    256        1.1     pooka 		}
    257        1.1     pooka 		break;
    258        1.1     pooka 	}
    259        1.1     pooka 
    260        1.1     pooka 	return 0;
    261        1.1     pooka }
    262        1.1     pooka 
    263        1.1     pooka void
    264        1.1     pooka rw_exit(krwlock_t *rw)
    265        1.1     pooka {
    266        1.1     pooka 	UPRW(rw);
    267        1.1     pooka 
    268        1.1     pooka 	if (uprw->uprw_readers > 0) {
    269        1.1     pooka 		uprw->uprw_readers--;
    270        1.1     pooka 	} else {
    271        1.1     pooka 		KASSERT(uprw->uprw_owner == curlwp);
    272        1.1     pooka 		uprw->uprw_owner = NULL;
    273        1.1     pooka 	}
    274        1.1     pooka 
    275        1.1     pooka 	if (uprw->uprw_wwant) {
    276        1.1     pooka 		rumpuser_cv_signal(uprw->uprw_rucv_writer);
    277        1.1     pooka 	} else if (uprw->uprw_rwant) {
    278        1.1     pooka 		rumpuser_cv_signal(uprw->uprw_rucv_reader);
    279        1.1     pooka 	}
    280        1.1     pooka }
    281        1.1     pooka 
    282        1.1     pooka int
    283        1.1     pooka rw_tryupgrade(krwlock_t *rw)
    284        1.1     pooka {
    285        1.1     pooka 	UPRW(rw);
    286        1.1     pooka 
    287        1.1     pooka 	if (uprw->uprw_readers == 1 && uprw->uprw_owner == NULL) {
    288        1.1     pooka 		uprw->uprw_readers = 0;
    289        1.1     pooka 		uprw->uprw_owner = curlwp;
    290        1.1     pooka 		return 1;
    291        1.1     pooka 	} else {
    292        1.1     pooka 		return 0;
    293        1.1     pooka 	}
    294        1.1     pooka }
    295        1.1     pooka 
    296        1.1     pooka int
    297        1.1     pooka rw_write_held(krwlock_t *rw)
    298        1.1     pooka {
    299        1.1     pooka 	UPRW(rw);
    300        1.1     pooka 
    301        1.1     pooka 	return uprw->uprw_owner == curlwp;
    302        1.1     pooka }
    303        1.1     pooka 
    304        1.1     pooka int
    305        1.1     pooka rw_read_held(krwlock_t *rw)
    306        1.1     pooka {
    307        1.1     pooka 	UPRW(rw);
    308        1.1     pooka 
    309        1.1     pooka 	return uprw->uprw_readers > 0;
    310        1.1     pooka }
    311        1.1     pooka 
    312        1.1     pooka int
    313        1.1     pooka rw_lock_held(krwlock_t *rw)
    314        1.1     pooka {
    315        1.1     pooka 	UPRW(rw);
    316        1.1     pooka 
    317        1.1     pooka 	return uprw->uprw_owner || uprw->uprw_readers;
    318        1.1     pooka }
    319        1.1     pooka 
    320  1.10.18.1    martin krw_t
    321  1.10.18.1    martin rw_lock_op(krwlock_t *rw)
    322  1.10.18.1    martin {
    323  1.10.18.1    martin 
    324  1.10.18.1    martin 	return rw_write_held(rw) ? RW_WRITER : RW_READER;
    325  1.10.18.1    martin }
    326        1.1     pooka 
    327        1.1     pooka /*
    328        1.1     pooka  * Condvars are almost the same as in the MP case except that we
    329        1.1     pooka  * use the scheduler mutex as the pthread interlock instead of the
    330        1.1     pooka  * mutex associated with the condvar.
    331        1.1     pooka  */
    332        1.1     pooka 
    333        1.1     pooka #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
    334        1.1     pooka 
    335        1.1     pooka void
    336        1.1     pooka cv_init(kcondvar_t *cv, const char *msg)
    337        1.1     pooka {
    338        1.1     pooka 
    339        1.1     pooka 	CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
    340        1.1     pooka 	checkncpu();
    341        1.1     pooka 
    342        1.1     pooka 	rumpuser_cv_init((struct rumpuser_cv **)cv);
    343        1.1     pooka }
    344        1.1     pooka 
    345        1.1     pooka void
    346        1.1     pooka cv_destroy(kcondvar_t *cv)
    347        1.1     pooka {
    348        1.1     pooka 
    349        1.1     pooka 	rumpuser_cv_destroy(RUMPCV(cv));
    350        1.1     pooka }
    351        1.1     pooka 
    352        1.1     pooka void
    353        1.1     pooka cv_wait(kcondvar_t *cv, kmutex_t *mtx)
    354        1.1     pooka {
    355        1.1     pooka #ifdef DIAGNOSTIC
    356        1.1     pooka 	UPMTX(mtx);
    357        1.1     pooka 	KASSERT(upm->upm_owner == curlwp);
    358        1.1     pooka 
    359        1.1     pooka 	if (rump_threads == 0)
    360        1.1     pooka 		panic("cv_wait without threads");
    361        1.1     pooka #endif
    362        1.1     pooka 
    363        1.1     pooka 	/*
    364        1.1     pooka 	 * NOTE: we must atomically release the *CPU* here, i.e.
    365        1.1     pooka 	 * nothing between mutex_exit and entering rumpuser condwait
    366        1.1     pooka 	 * may preempt us from the virtual CPU.
    367        1.1     pooka 	 */
    368        1.1     pooka 	mutex_exit(mtx);
    369        1.1     pooka 	rump_schedlock_cv_wait(RUMPCV(cv));
    370        1.1     pooka 	mutex_enter(mtx);
    371        1.1     pooka }
    372        1.1     pooka 
    373        1.1     pooka int
    374        1.1     pooka cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
    375        1.1     pooka {
    376        1.1     pooka 
    377        1.1     pooka 	cv_wait(cv, mtx);
    378        1.1     pooka 	return 0;
    379        1.1     pooka }
    380        1.1     pooka 
    381        1.1     pooka int
    382        1.1     pooka cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
    383        1.1     pooka {
    384        1.8     pooka 	struct timespec ts;
    385        1.1     pooka 
    386        1.1     pooka #ifdef DIAGNOSTIC
    387        1.1     pooka 	UPMTX(mtx);
    388        1.1     pooka 	KASSERT(upm->upm_owner == curlwp);
    389        1.1     pooka #endif
    390        1.1     pooka 
    391        1.8     pooka 	ts.tv_sec = ticks / hz;
    392        1.8     pooka 	ts.tv_nsec = (ticks % hz) * (1000000000/hz);
    393        1.1     pooka 
    394        1.1     pooka 	if (ticks == 0) {
    395        1.1     pooka 		cv_wait(cv, mtx);
    396        1.1     pooka 		return 0;
    397        1.1     pooka 	} else {
    398        1.1     pooka 		int rv;
    399        1.1     pooka 		mutex_exit(mtx);
    400        1.1     pooka 		rv = rump_schedlock_cv_timedwait(RUMPCV(cv), &ts);
    401        1.1     pooka 		mutex_enter(mtx);
    402        1.1     pooka 		if (rv)
    403        1.1     pooka 			return EWOULDBLOCK;
    404        1.1     pooka 		else
    405        1.1     pooka 			return 0;
    406        1.1     pooka 	}
    407        1.1     pooka }
    408        1.1     pooka 
    409        1.1     pooka int
    410        1.1     pooka cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int ticks)
    411        1.1     pooka {
    412        1.1     pooka 
    413        1.1     pooka 	return cv_timedwait(cv, mtx, ticks);
    414        1.1     pooka }
    415        1.1     pooka 
    416        1.1     pooka void
    417        1.1     pooka cv_signal(kcondvar_t *cv)
    418        1.1     pooka {
    419        1.1     pooka 
    420        1.1     pooka 	/* CPU == interlock */
    421        1.1     pooka 	rumpuser_cv_signal(RUMPCV(cv));
    422        1.1     pooka }
    423        1.1     pooka 
    424        1.1     pooka void
    425        1.1     pooka cv_broadcast(kcondvar_t *cv)
    426        1.1     pooka {
    427        1.1     pooka 
    428        1.1     pooka 	/* CPU == interlock */
    429        1.1     pooka 	rumpuser_cv_broadcast(RUMPCV(cv));
    430        1.1     pooka }
    431        1.1     pooka 
    432        1.1     pooka bool
    433        1.1     pooka cv_has_waiters(kcondvar_t *cv)
    434        1.1     pooka {
    435        1.9     pooka 	int n;
    436        1.1     pooka 
    437        1.9     pooka 	rumpuser_cv_has_waiters(RUMPCV(cv), &n);
    438        1.9     pooka 
    439        1.9     pooka 	return n > 0;
    440        1.1     pooka }
    441        1.1     pooka 
    442        1.1     pooka /* this is not much of an attempt, but ... */
    443        1.1     pooka bool
    444        1.1     pooka cv_is_valid(kcondvar_t *cv)
    445        1.1     pooka {
    446        1.1     pooka 
    447        1.1     pooka 	return RUMPCV(cv) != NULL;
    448        1.1     pooka }
    449