Home | History | Annotate | Line # | Download | only in kern
kern_mutex_obj.c revision 1.9
      1  1.9  riastrad /*	$NetBSD: kern_mutex_obj.c,v 1.9 2022/04/09 23:38:33 riastradh Exp $	*/
      2  1.1     pooka 
      3  1.1     pooka /*-
      4  1.7        ad  * Copyright (c) 2008, 2019 The NetBSD Foundation, Inc.
      5  1.1     pooka  * All rights reserved.
      6  1.1     pooka  *
      7  1.1     pooka  * This code is derived from software contributed to The NetBSD Foundation
      8  1.2        ad  * by Andrew Doran.
      9  1.1     pooka  *
     10  1.1     pooka  * Redistribution and use in source and binary forms, with or without
     11  1.1     pooka  * modification, are permitted provided that the following conditions
     12  1.1     pooka  * are met:
     13  1.1     pooka  * 1. Redistributions of source code must retain the above copyright
     14  1.1     pooka  *    notice, this list of conditions and the following disclaimer.
     15  1.1     pooka  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.1     pooka  *    notice, this list of conditions and the following disclaimer in the
     17  1.1     pooka  *    documentation and/or other materials provided with the distribution.
     18  1.1     pooka  *
     19  1.1     pooka  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.1     pooka  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.1     pooka  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.1     pooka  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.1     pooka  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.1     pooka  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.1     pooka  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.1     pooka  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.1     pooka  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.1     pooka  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.1     pooka  * POSSIBILITY OF SUCH DAMAGE.
     30  1.1     pooka  */
     31  1.1     pooka 
     32  1.1     pooka #include <sys/cdefs.h>
     33  1.9  riastrad __KERNEL_RCSID(0, "$NetBSD: kern_mutex_obj.c,v 1.9 2022/04/09 23:38:33 riastradh Exp $");
     34  1.1     pooka 
     35  1.1     pooka #include <sys/param.h>
     36  1.1     pooka #include <sys/atomic.h>
     37  1.1     pooka #include <sys/mutex.h>
     38  1.1     pooka #include <sys/pool.h>
     39  1.1     pooka 
     40  1.1     pooka /* Mutex cache */
     41  1.1     pooka #define	MUTEX_OBJ_MAGIC	0x5aa3c85d
     42  1.1     pooka struct kmutexobj {
     43  1.1     pooka 	kmutex_t	mo_lock;
     44  1.1     pooka 	u_int		mo_magic;
     45  1.1     pooka 	u_int		mo_refcnt;
     46  1.1     pooka };
     47  1.1     pooka 
     48  1.1     pooka static int	mutex_obj_ctor(void *, void *, int);
     49  1.1     pooka 
     50  1.3     rmind static pool_cache_t	mutex_obj_cache		__read_mostly;
     51  1.1     pooka 
     52  1.1     pooka /*
     53  1.1     pooka  * mutex_obj_init:
     54  1.1     pooka  *
     55  1.1     pooka  *	Initialize the mutex object store.
     56  1.1     pooka  */
     57  1.1     pooka void
     58  1.1     pooka mutex_obj_init(void)
     59  1.1     pooka {
     60  1.1     pooka 
     61  1.1     pooka 	mutex_obj_cache = pool_cache_init(sizeof(struct kmutexobj),
     62  1.1     pooka 	    coherency_unit, 0, 0, "mutex", NULL, IPL_NONE, mutex_obj_ctor,
     63  1.1     pooka 	    NULL, NULL);
     64  1.1     pooka }
     65  1.1     pooka 
     66  1.1     pooka /*
     67  1.1     pooka  * mutex_obj_ctor:
     68  1.1     pooka  *
     69  1.1     pooka  *	Initialize a new lock for the cache.
     70  1.1     pooka  */
     71  1.1     pooka static int
     72  1.1     pooka mutex_obj_ctor(void *arg, void *obj, int flags)
     73  1.1     pooka {
     74  1.1     pooka 	struct kmutexobj * mo = obj;
     75  1.1     pooka 
     76  1.1     pooka 	mo->mo_magic = MUTEX_OBJ_MAGIC;
     77  1.1     pooka 
     78  1.1     pooka 	return 0;
     79  1.1     pooka }
     80  1.1     pooka 
     81  1.1     pooka /*
     82  1.1     pooka  * mutex_obj_alloc:
     83  1.1     pooka  *
     84  1.7        ad  *	Allocate a single lock object, waiting for memory if needed.
     85  1.1     pooka  */
     86  1.1     pooka kmutex_t *
     87  1.1     pooka mutex_obj_alloc(kmutex_type_t type, int ipl)
     88  1.1     pooka {
     89  1.1     pooka 	struct kmutexobj *mo;
     90  1.6     ozaki 	extern void _mutex_init(kmutex_t *, kmutex_type_t, int, uintptr_t);
     91  1.1     pooka 
     92  1.1     pooka 	mo = pool_cache_get(mutex_obj_cache, PR_WAITOK);
     93  1.6     ozaki 	_mutex_init(&mo->mo_lock, type, ipl,
     94  1.6     ozaki 	    (uintptr_t)__builtin_return_address(0));
     95  1.1     pooka 	mo->mo_refcnt = 1;
     96  1.1     pooka 
     97  1.1     pooka 	return (kmutex_t *)mo;
     98  1.1     pooka }
     99  1.1     pooka 
    100  1.1     pooka /*
    101  1.7        ad  * mutex_obj_alloc:
    102  1.7        ad  *
    103  1.7        ad  *	Allocate a single lock object, failing if no memory available.
    104  1.7        ad  */
    105  1.7        ad kmutex_t *
    106  1.7        ad mutex_obj_tryalloc(kmutex_type_t type, int ipl)
    107  1.7        ad {
    108  1.7        ad 	struct kmutexobj *mo;
    109  1.7        ad 	extern void _mutex_init(kmutex_t *, kmutex_type_t, int, uintptr_t);
    110  1.7        ad 
    111  1.7        ad 	mo = pool_cache_get(mutex_obj_cache, PR_NOWAIT);
    112  1.7        ad 	if (__predict_true(mo != NULL)) {
    113  1.7        ad 		_mutex_init(&mo->mo_lock, type, ipl,
    114  1.7        ad 		    (uintptr_t)__builtin_return_address(0));
    115  1.7        ad 		mo->mo_refcnt = 1;
    116  1.7        ad 	}
    117  1.7        ad 
    118  1.7        ad 	return (kmutex_t *)mo;
    119  1.7        ad }
    120  1.7        ad 
    121  1.7        ad /*
    122  1.1     pooka  * mutex_obj_hold:
    123  1.1     pooka  *
    124  1.1     pooka  *	Add a single reference to a lock object.  A reference to the object
    125  1.1     pooka  *	must already be held, and must be held across this call.
    126  1.1     pooka  */
    127  1.1     pooka void
    128  1.1     pooka mutex_obj_hold(kmutex_t *lock)
    129  1.1     pooka {
    130  1.1     pooka 	struct kmutexobj *mo = (struct kmutexobj *)lock;
    131  1.1     pooka 
    132  1.4      matt 	KASSERTMSG(mo->mo_magic == MUTEX_OBJ_MAGIC,
    133  1.5       jym 	    "%s: lock %p: mo->mo_magic (%#x) != MUTEX_OBJ_MAGIC (%#x)",
    134  1.5       jym 	     __func__, mo, mo->mo_magic, MUTEX_OBJ_MAGIC);
    135  1.4      matt 	KASSERTMSG(mo->mo_refcnt > 0,
    136  1.5       jym 	    "%s: lock %p: mo->mo_refcnt (%#x) == 0",
    137  1.5       jym 	     __func__, mo, mo->mo_refcnt);
    138  1.1     pooka 
    139  1.1     pooka 	atomic_inc_uint(&mo->mo_refcnt);
    140  1.1     pooka }
    141  1.1     pooka 
    142  1.1     pooka /*
    143  1.1     pooka  * mutex_obj_free:
    144  1.1     pooka  *
    145  1.1     pooka  *	Drop a reference from a lock object.  If the last reference is being
    146  1.1     pooka  *	dropped, free the object and return true.  Otherwise, return false.
    147  1.1     pooka  */
    148  1.1     pooka bool
    149  1.1     pooka mutex_obj_free(kmutex_t *lock)
    150  1.1     pooka {
    151  1.1     pooka 	struct kmutexobj *mo = (struct kmutexobj *)lock;
    152  1.1     pooka 
    153  1.4      matt 	KASSERTMSG(mo->mo_magic == MUTEX_OBJ_MAGIC,
    154  1.5       jym 	    "%s: lock %p: mo->mo_magic (%#x) != MUTEX_OBJ_MAGIC (%#x)",
    155  1.5       jym 	     __func__, mo, mo->mo_magic, MUTEX_OBJ_MAGIC);
    156  1.4      matt 	KASSERTMSG(mo->mo_refcnt > 0,
    157  1.5       jym 	    "%s: lock %p: mo->mo_refcnt (%#x) == 0",
    158  1.5       jym 	     __func__, mo, mo->mo_refcnt);
    159  1.1     pooka 
    160  1.8  riastrad #ifndef __HAVE_ATOMIC_AS_MEMBAR
    161  1.9  riastrad 	membar_release();
    162  1.8  riastrad #endif
    163  1.1     pooka 	if (atomic_dec_uint_nv(&mo->mo_refcnt) > 0) {
    164  1.1     pooka 		return false;
    165  1.1     pooka 	}
    166  1.8  riastrad #ifndef __HAVE_ATOMIC_AS_MEMBAR
    167  1.9  riastrad 	membar_acquire();
    168  1.8  riastrad #endif
    169  1.1     pooka 	mutex_destroy(&mo->mo_lock);
    170  1.1     pooka 	pool_cache_put(mutex_obj_cache, mo);
    171  1.1     pooka 	return true;
    172  1.1     pooka }
    173  1.7        ad 
    174  1.7        ad /*
    175  1.7        ad  * mutex_obj_refcnt:
    176  1.7        ad  *
    177  1.7        ad  *	Return the reference count on a lock object.
    178  1.7        ad  */
    179  1.7        ad u_int
    180  1.7        ad mutex_obj_refcnt(kmutex_t *lock)
    181  1.7        ad {
    182  1.7        ad 	struct kmutexobj *mo = (struct kmutexobj *)lock;
    183  1.7        ad 
    184  1.7        ad 	return mo->mo_refcnt;
    185  1.7        ad }
    186