Home | History | Annotate | Line # | Download | only in kern
kern_rwlock_obj.c revision 1.5
      1 /*	$NetBSD: kern_rwlock_obj.c,v 1.5 2020/01/01 21:34:39 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2008, 2009, 2019 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: kern_rwlock_obj.c,v 1.5 2020/01/01 21:34:39 ad Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/atomic.h>
     37 #include <sys/pool.h>
     38 #include <sys/rwlock.h>
     39 
     40 /* Mutex cache */
     41 #define	RW_OBJ_MAGIC	0x85d3c85d
     42 struct krwobj {
     43 	krwlock_t	ro_lock;
     44 	u_int		ro_magic;
     45 	u_int		ro_refcnt;
     46 };
     47 
     48 static int	rw_obj_ctor(void *, void *, int);
     49 
     50 static pool_cache_t	rw_obj_cache	__read_mostly;
     51 
     52 /*
     53  * rw_obj_init:
     54  *
     55  *	Initialize the rw object store.
     56  */
     57 void
     58 rw_obj_init(void)
     59 {
     60 
     61 	rw_obj_cache = pool_cache_init(sizeof(struct krwobj),
     62 	    coherency_unit, 0, 0, "rwlock", NULL, IPL_NONE, rw_obj_ctor,
     63 	    NULL, NULL);
     64 }
     65 
     66 /*
     67  * rw_obj_ctor:
     68  *
     69  *	Initialize a new lock for the cache.
     70  */
     71 static int
     72 rw_obj_ctor(void *arg, void *obj, int flags)
     73 {
     74 	struct krwobj * ro = obj;
     75 
     76 	ro->ro_magic = RW_OBJ_MAGIC;
     77 
     78 	return 0;
     79 }
     80 
     81 /*
     82  * rw_obj_alloc:
     83  *
     84  *	Allocate a single lock object, waiting for memory if needed.
     85  */
     86 krwlock_t *
     87 rw_obj_alloc(void)
     88 {
     89 	struct krwobj *ro;
     90 	extern void _rw_init(krwlock_t *, uintptr_t);
     91 
     92 	ro = pool_cache_get(rw_obj_cache, PR_WAITOK);
     93 	_rw_init(&ro->ro_lock, (uintptr_t)__builtin_return_address(0));
     94 	ro->ro_refcnt = 1;
     95 
     96 	return (krwlock_t *)ro;
     97 }
     98 
     99 /*
    100  * rw_obj_tryalloc:
    101  *
    102  *	Allocate a single lock object, but fail if no memory is available.
    103  */
    104 krwlock_t *
    105 rw_obj_tryalloc(void)
    106 {
    107 	struct krwobj *ro;
    108 	extern void _rw_init(krwlock_t *, uintptr_t);
    109 
    110 	ro = pool_cache_get(rw_obj_cache, PR_NOWAIT);
    111 	if (__predict_true(ro != NULL)) {
    112 		_rw_init(&ro->ro_lock, (uintptr_t)__builtin_return_address(0));
    113 		ro->ro_refcnt = 1;
    114 	}
    115 
    116 	return (krwlock_t *)ro;
    117 }
    118 
    119 /*
    120  * rw_obj_hold:
    121  *
    122  *	Add a single reference to a lock object.  A reference to the object
    123  *	must already be held, and must be held across this call.
    124  */
    125 void
    126 rw_obj_hold(krwlock_t *lock)
    127 {
    128 	struct krwobj *ro = (struct krwobj *)lock;
    129 
    130 	KASSERT(ro->ro_magic == RW_OBJ_MAGIC);
    131 	KASSERT(ro->ro_refcnt > 0);
    132 
    133 	atomic_inc_uint(&ro->ro_refcnt);
    134 }
    135 
    136 /*
    137  * rw_obj_free:
    138  *
    139  *	Drop a reference from a lock object.  If the last reference is being
    140  *	dropped, free the object and return true.  Otherwise, return false.
    141  */
    142 bool
    143 rw_obj_free(krwlock_t *lock)
    144 {
    145 	struct krwobj *ro = (struct krwobj *)lock;
    146 
    147 	KASSERT(ro->ro_magic == RW_OBJ_MAGIC);
    148 	KASSERT(ro->ro_refcnt > 0);
    149 
    150 	if (atomic_dec_uint_nv(&ro->ro_refcnt) > 0) {
    151 		return false;
    152 	}
    153 	rw_destroy(&ro->ro_lock);
    154 	pool_cache_put(rw_obj_cache, ro);
    155 	return true;
    156 }
    157 
    158 /*
    159  * rw_obj_refcnt:
    160  *
    161  *	Return the reference count for a lock object.
    162  */
    163 u_int
    164 rw_obj_refcnt(krwlock_t *lock)
    165 {
    166 	struct krwobj *ro = (struct krwobj *)lock;
    167 
    168 	return ro->ro_refcnt;
    169 }
    170