Home | History | Annotate | Line # | Download | only in linux
spinlock.h revision 1.7
      1 /*	$NetBSD: spinlock.h,v 1.7 2016/04/13 08:43:56 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _LINUX_SPINLOCK_H_
     33 #define _LINUX_SPINLOCK_H_
     34 
     35 #include <sys/cdefs.h>
     36 #include <sys/mutex.h>
     37 
     38 #include <machine/limits.h>
     39 
     40 #define	__acquires(lock)	/* XXX lockdep stuff */
     41 #define	__releases(lock)	/* XXX lockdep stuff */
     42 
     43 typedef struct spinlock {
     44 	kmutex_t sl_lock;
     45 } spinlock_t;
     46 
     47 static inline int
     48 spin_is_locked(spinlock_t *spinlock)
     49 {
     50 	return mutex_owned(&spinlock->sl_lock);
     51 }
     52 
     53 static inline void
     54 spin_lock(spinlock_t *spinlock)
     55 {
     56 	mutex_enter(&spinlock->sl_lock);
     57 }
     58 
     59 static inline void
     60 spin_unlock(spinlock_t *spinlock)
     61 {
     62 	mutex_exit(&spinlock->sl_lock);
     63 }
     64 
     65 static inline void
     66 spin_lock_irq(spinlock_t *spinlock)
     67 {
     68 	spin_lock(spinlock);
     69 }
     70 
     71 static inline void
     72 spin_unlock_irq(spinlock_t *spinlock)
     73 {
     74 	spin_unlock(spinlock);
     75 }
     76 
     77 /* Must be a macro because the second argument is to be assigned.  */
     78 #define	spin_lock_irqsave(SPINLOCK, FLAGS)				\
     79 	do {								\
     80 		(FLAGS) = 0;						\
     81 		mutex_enter(&((spinlock_t *)(SPINLOCK))->sl_lock);	\
     82 	} while (0)
     83 
     84 static inline void
     85 spin_unlock_irqrestore(spinlock_t *spinlock, unsigned long __unused flags)
     86 {
     87 	mutex_exit(&spinlock->sl_lock);
     88 }
     89 
     90 static inline void
     91 spin_lock_init(spinlock_t *spinlock)
     92 {
     93 	/* XXX What's the right IPL?  IPL_DRM...?  */
     94 	mutex_init(&spinlock->sl_lock, MUTEX_DEFAULT, IPL_VM);
     95 }
     96 
     97 /*
     98  * XXX Linux doesn't ever destroy spin locks, it seems.  We'll have to
     99  * kludge it up.
    100  */
    101 
    102 static inline void
    103 spin_lock_destroy(spinlock_t *spinlock)
    104 {
    105 	mutex_destroy(&spinlock->sl_lock);
    106 }
    107 
    108 /* This is a macro to make the panic message clearer.  */
    109 #define	assert_spin_locked(spinlock)	\
    110 	KASSERT(mutex_owned(&(spinlock)->sl_lock))
    111 
    112 /*
    113  * Stupid reader/writer spin locks.  No attempt to avoid writer
    114  * starvation.  Must allow recursive readers.  We use mutex and state
    115  * instead of compare-and-swap for expedience and LOCKDEBUG support.
    116  */
    117 
    118 typedef struct linux_rwlock {
    119 	kmutex_t	rw_lock;
    120 	unsigned	rw_nreaders;
    121 } rwlock_t;
    122 
    123 static inline void
    124 rwlock_init(rwlock_t *rw)
    125 {
    126 
    127 	mutex_init(&rw->rw_lock, MUTEX_DEFAULT, IPL_VM);
    128 	rw->rw_nreaders = 0;
    129 }
    130 
    131 static inline void
    132 rwlock_destroy(rwlock_t *rw)
    133 {
    134 
    135 	KASSERTMSG(rw->rw_nreaders == 0,
    136 	    "rwlock still held by %u readers", rw->rw_nreaders);
    137 	mutex_destroy(&rw->rw_lock);
    138 }
    139 
    140 static inline void
    141 write_lock_irq(rwlock_t *rw)
    142 {
    143 
    144 	for (;;) {
    145 		mutex_spin_enter(&rw->rw_lock);
    146 		if (rw->rw_nreaders == 0)
    147 			break;
    148 		mutex_spin_exit(&rw->rw_lock);
    149 	}
    150 }
    151 
    152 static inline void
    153 write_unlock_irq(rwlock_t *rw)
    154 {
    155 
    156 	KASSERT(rw->rw_nreaders == 0);
    157 	mutex_spin_exit(&rw->rw_lock);
    158 }
    159 
    160 static inline void
    161 read_lock(rwlock_t *rw)
    162 {
    163 
    164 	mutex_spin_enter(&rw->rw_lock);
    165 	KASSERT(rw->rw_nreaders < UINT_MAX);
    166 	rw->rw_nreaders++;
    167 	mutex_spin_exit(&rw->rw_lock);
    168 }
    169 
    170 static inline void
    171 read_unlock(rwlock_t *rw)
    172 {
    173 
    174 	mutex_spin_enter(&rw->rw_lock);
    175 	KASSERT(0 < rw->rw_nreaders);
    176 	rw->rw_nreaders--;
    177 	mutex_spin_exit(&rw->rw_lock);
    178 }
    179 
    180 #endif  /* _LINUX_SPINLOCK_H_ */
    181