Home | History | Annotate | Line # | Download | only in linux
      1 /*	$NetBSD: spinlock.h,v 1.15 2022/10/25 23:33:29 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _LINUX_SPINLOCK_H_
     33 #define _LINUX_SPINLOCK_H_
     34 
     35 #include <sys/cdefs.h>
     36 #include <sys/mutex.h>
     37 
     38 #include <machine/limits.h>
     39 
     40 #include <linux/atomic.h>
     41 #include <linux/irqflags.h>
     42 #include <linux/lockdep.h>
     43 #include <linux/preempt.h>
     44 
     45 typedef struct spinlock {
     46 	kmutex_t sl_lock;
     47 } spinlock_t;
     48 
     49 static inline int
     50 spin_is_locked(spinlock_t *spinlock)
     51 {
     52 	return mutex_owned(&spinlock->sl_lock);
     53 }
     54 
     55 static inline void
     56 spin_lock(spinlock_t *spinlock)
     57 {
     58 	mutex_enter(&spinlock->sl_lock);
     59 }
     60 
     61 static inline void
     62 spin_unlock(spinlock_t *spinlock)
     63 {
     64 	mutex_exit(&spinlock->sl_lock);
     65 }
     66 
     67 static inline void
     68 spin_lock_bh(spinlock_t *spinlock)
     69 {
     70 	spin_lock(spinlock);
     71 }
     72 
     73 static inline void
     74 spin_unlock_bh(spinlock_t *spinlock)
     75 {
     76 	spin_unlock(spinlock);
     77 }
     78 
     79 static inline void
     80 spin_lock_irq(spinlock_t *spinlock)
     81 {
     82 	spin_lock(spinlock);
     83 }
     84 
     85 static inline void
     86 spin_unlock_irq(spinlock_t *spinlock)
     87 {
     88 	spin_unlock(spinlock);
     89 }
     90 
     91 /* Must be a macro because the second argument is to be assigned.  */
     92 #define	spin_lock_irqsave(SPINLOCK, FLAGS)				\
     93 	do {								\
     94 		(FLAGS) = 0;						\
     95 		mutex_enter(&((spinlock_t *)(SPINLOCK))->sl_lock);	\
     96 	} while (0)
     97 
     98 #define	spin_trylock_irqsave(SPINLOCK, FLAGS)				\
     99 		( (FLAGS) = 0,						\
    100 		mutex_tryenter(&((spinlock_t *)(SPINLOCK))->sl_lock) )
    101 
    102 static inline void
    103 spin_unlock_irqrestore(spinlock_t *spinlock, unsigned long __unused flags)
    104 {
    105 	mutex_exit(&spinlock->sl_lock);
    106 }
    107 
    108 static inline void
    109 spin_lock_nested(spinlock_t *spinlock, int subclass)
    110 {
    111 	spin_lock(spinlock);
    112 }
    113 
    114 #define	spin_lock_irqsave_nested(SPINLOCK, FLAGS, SUBCLASS)		      \
    115 	spin_lock_irqsave(SPINLOCK, FLAGS)
    116 
    117 static inline void
    118 spin_lock_init(spinlock_t *spinlock)
    119 {
    120 	/* XXX What's the right IPL?  IPL_DRM...?  */
    121 	mutex_init(&spinlock->sl_lock, MUTEX_DEFAULT, IPL_VM);
    122 }
    123 
    124 /*
    125  * XXX Linux doesn't ever destroy spin locks, it seems.  We'll have to
    126  * kludge it up.
    127  */
    128 
    129 static inline void
    130 spin_lock_destroy(spinlock_t *spinlock)
    131 {
    132 	mutex_destroy(&spinlock->sl_lock);
    133 }
    134 
    135 /* This is a macro to make the panic message clearer.  */
    136 #define	assert_spin_locked(spinlock)	\
    137 	KASSERT(mutex_owned(&(spinlock)->sl_lock))
    138 
    139 /*
    140  * Stupid reader/writer spin locks.  No attempt to avoid writer
    141  * starvation.  Must allow recursive readers.  We use mutex and state
    142  * instead of compare-and-swap for expedience and LOCKDEBUG support.
    143  */
    144 
    145 typedef struct linux_rwlock {
    146 	kmutex_t	rw_lock;
    147 	unsigned	rw_nreaders;
    148 } rwlock_t;
    149 
    150 static inline void
    151 rwlock_init(rwlock_t *rw)
    152 {
    153 
    154 	mutex_init(&rw->rw_lock, MUTEX_DEFAULT, IPL_VM);
    155 	rw->rw_nreaders = 0;
    156 }
    157 
    158 static inline void
    159 rwlock_destroy(rwlock_t *rw)
    160 {
    161 
    162 	KASSERTMSG(rw->rw_nreaders == 0,
    163 	    "rwlock still held by %u readers", rw->rw_nreaders);
    164 	mutex_destroy(&rw->rw_lock);
    165 }
    166 
    167 static inline void
    168 write_lock_irq(rwlock_t *rw)
    169 {
    170 
    171 	for (;;) {
    172 		mutex_spin_enter(&rw->rw_lock);
    173 		if (rw->rw_nreaders == 0)
    174 			break;
    175 		mutex_spin_exit(&rw->rw_lock);
    176 	}
    177 }
    178 
    179 static inline void
    180 write_unlock_irq(rwlock_t *rw)
    181 {
    182 
    183 	KASSERT(rw->rw_nreaders == 0);
    184 	mutex_spin_exit(&rw->rw_lock);
    185 }
    186 
    187 static inline void
    188 read_lock(rwlock_t *rw)
    189 {
    190 
    191 	mutex_spin_enter(&rw->rw_lock);
    192 	KASSERT(rw->rw_nreaders < UINT_MAX);
    193 	rw->rw_nreaders++;
    194 	mutex_spin_exit(&rw->rw_lock);
    195 }
    196 
    197 static inline void
    198 read_unlock(rwlock_t *rw)
    199 {
    200 
    201 	mutex_spin_enter(&rw->rw_lock);
    202 	KASSERT(0 < rw->rw_nreaders);
    203 	rw->rw_nreaders--;
    204 	mutex_spin_exit(&rw->rw_lock);
    205 }
    206 
    207 static inline void
    208 local_bh_disable(void)
    209 {
    210 }
    211 
    212 static inline void
    213 local_bh_enable(void)
    214 {
    215 }
    216 
    217 #define	atomic_dec_and_lock_irqsave(A, L, F)				      \
    218 	_atomic_dec_and_lock_irqsave(A, L, &(F))
    219 
    220 static inline bool __must_check
    221 _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
    222     unsigned long *flagsp)
    223 {
    224 	unsigned old, new;
    225 
    226 	do {
    227 		old = atomic_read(atomic);
    228 		KASSERT(old);
    229 		if (old == 1) {
    230 			spin_lock_irqsave(lock, *flagsp);
    231 			if (atomic_dec_return(atomic) == 0)
    232 				return true;
    233 			spin_unlock_irqrestore(lock, *flagsp);
    234 			return false;
    235 		}
    236 		new = old - 1;
    237 	} while (atomic_cmpxchg(atomic, old, new) != old);
    238 
    239 	KASSERT(old != 1);
    240 	KASSERT(new != 0);
    241 	return false;
    242 }
    243 
    244 #endif  /* _LINUX_SPINLOCK_H_ */
    245