Home | History | Annotate | Line # | Download | only in linux
      1  1.15  riastrad /*	$NetBSD: spinlock.h,v 1.15 2022/10/25 23:33:29 riastradh Exp $	*/
      2   1.2  riastrad 
      3   1.2  riastrad /*-
      4   1.2  riastrad  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5   1.2  riastrad  * All rights reserved.
      6   1.2  riastrad  *
      7   1.2  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8   1.2  riastrad  * by Taylor R. Campbell.
      9   1.2  riastrad  *
     10   1.2  riastrad  * Redistribution and use in source and binary forms, with or without
     11   1.2  riastrad  * modification, are permitted provided that the following conditions
     12   1.2  riastrad  * are met:
     13   1.2  riastrad  * 1. Redistributions of source code must retain the above copyright
     14   1.2  riastrad  *    notice, this list of conditions and the following disclaimer.
     15   1.2  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.2  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17   1.2  riastrad  *    documentation and/or other materials provided with the distribution.
     18   1.2  riastrad  *
     19   1.2  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.2  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.2  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.2  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.2  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.2  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.2  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.2  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.2  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.2  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.2  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30   1.2  riastrad  */
     31   1.2  riastrad 
     32   1.2  riastrad #ifndef _LINUX_SPINLOCK_H_
     33   1.2  riastrad #define _LINUX_SPINLOCK_H_
     34   1.2  riastrad 
     35   1.2  riastrad #include <sys/cdefs.h>
     36   1.2  riastrad #include <sys/mutex.h>
     37   1.2  riastrad 
     38   1.7  riastrad #include <machine/limits.h>
     39   1.9  riastrad 
     40  1.14  riastrad #include <linux/atomic.h>
     41   1.8  riastrad #include <linux/irqflags.h>
     42   1.9  riastrad #include <linux/lockdep.h>
     43   1.8  riastrad #include <linux/preempt.h>
     44   1.7  riastrad 
     45   1.2  riastrad typedef struct spinlock {
     46   1.2  riastrad 	kmutex_t sl_lock;
     47   1.2  riastrad } spinlock_t;
     48   1.2  riastrad 
     49   1.2  riastrad static inline int
     50   1.2  riastrad spin_is_locked(spinlock_t *spinlock)
     51   1.2  riastrad {
     52   1.2  riastrad 	return mutex_owned(&spinlock->sl_lock);
     53   1.2  riastrad }
     54   1.2  riastrad 
     55   1.2  riastrad static inline void
     56   1.2  riastrad spin_lock(spinlock_t *spinlock)
     57   1.2  riastrad {
     58   1.2  riastrad 	mutex_enter(&spinlock->sl_lock);
     59   1.2  riastrad }
     60   1.2  riastrad 
     61   1.2  riastrad static inline void
     62   1.2  riastrad spin_unlock(spinlock_t *spinlock)
     63   1.2  riastrad {
     64   1.2  riastrad 	mutex_exit(&spinlock->sl_lock);
     65   1.2  riastrad }
     66   1.2  riastrad 
     67   1.2  riastrad static inline void
     68  1.15  riastrad spin_lock_bh(spinlock_t *spinlock)
     69  1.15  riastrad {
     70  1.15  riastrad 	spin_lock(spinlock);
     71  1.15  riastrad }
     72  1.15  riastrad 
     73  1.15  riastrad static inline void
     74  1.15  riastrad spin_unlock_bh(spinlock_t *spinlock)
     75  1.15  riastrad {
     76  1.15  riastrad 	spin_unlock(spinlock);
     77  1.15  riastrad }
     78  1.15  riastrad 
     79  1.15  riastrad static inline void
     80   1.2  riastrad spin_lock_irq(spinlock_t *spinlock)
     81   1.2  riastrad {
     82   1.2  riastrad 	spin_lock(spinlock);
     83   1.2  riastrad }
     84   1.2  riastrad 
     85   1.2  riastrad static inline void
     86   1.2  riastrad spin_unlock_irq(spinlock_t *spinlock)
     87   1.2  riastrad {
     88   1.2  riastrad 	spin_unlock(spinlock);
     89   1.2  riastrad }
     90   1.2  riastrad 
     91   1.2  riastrad /* Must be a macro because the second argument is to be assigned.  */
     92   1.2  riastrad #define	spin_lock_irqsave(SPINLOCK, FLAGS)				\
     93   1.2  riastrad 	do {								\
     94   1.2  riastrad 		(FLAGS) = 0;						\
     95   1.2  riastrad 		mutex_enter(&((spinlock_t *)(SPINLOCK))->sl_lock);	\
     96   1.2  riastrad 	} while (0)
     97   1.2  riastrad 
     98  1.13  riastrad #define	spin_trylock_irqsave(SPINLOCK, FLAGS)				\
     99  1.13  riastrad 		( (FLAGS) = 0,						\
    100  1.13  riastrad 		mutex_tryenter(&((spinlock_t *)(SPINLOCK))->sl_lock) )
    101  1.13  riastrad 
    102   1.2  riastrad static inline void
    103   1.2  riastrad spin_unlock_irqrestore(spinlock_t *spinlock, unsigned long __unused flags)
    104   1.2  riastrad {
    105   1.2  riastrad 	mutex_exit(&spinlock->sl_lock);
    106   1.2  riastrad }
    107   1.2  riastrad 
    108   1.2  riastrad static inline void
    109  1.10  riastrad spin_lock_nested(spinlock_t *spinlock, int subclass)
    110  1.10  riastrad {
    111  1.10  riastrad 	spin_lock(spinlock);
    112  1.10  riastrad }
    113  1.10  riastrad 
    114  1.11  riastrad #define	spin_lock_irqsave_nested(SPINLOCK, FLAGS, SUBCLASS)		      \
    115  1.11  riastrad 	spin_lock_irqsave(SPINLOCK, FLAGS)
    116  1.11  riastrad 
    117  1.10  riastrad static inline void
    118   1.2  riastrad spin_lock_init(spinlock_t *spinlock)
    119   1.2  riastrad {
    120   1.6       mrg 	/* XXX What's the right IPL?  IPL_DRM...?  */
    121   1.6       mrg 	mutex_init(&spinlock->sl_lock, MUTEX_DEFAULT, IPL_VM);
    122   1.2  riastrad }
    123   1.2  riastrad 
    124   1.2  riastrad /*
    125   1.2  riastrad  * XXX Linux doesn't ever destroy spin locks, it seems.  We'll have to
    126   1.2  riastrad  * kludge it up.
    127   1.2  riastrad  */
    128   1.2  riastrad 
    129   1.2  riastrad static inline void
    130   1.2  riastrad spin_lock_destroy(spinlock_t *spinlock)
    131   1.2  riastrad {
    132   1.2  riastrad 	mutex_destroy(&spinlock->sl_lock);
    133   1.2  riastrad }
    134   1.2  riastrad 
    135   1.2  riastrad /* This is a macro to make the panic message clearer.  */
    136   1.2  riastrad #define	assert_spin_locked(spinlock)	\
    137   1.2  riastrad 	KASSERT(mutex_owned(&(spinlock)->sl_lock))
    138   1.2  riastrad 
    139   1.3  riastrad /*
    140   1.7  riastrad  * Stupid reader/writer spin locks.  No attempt to avoid writer
    141   1.7  riastrad  * starvation.  Must allow recursive readers.  We use mutex and state
    142   1.7  riastrad  * instead of compare-and-swap for expedience and LOCKDEBUG support.
    143   1.3  riastrad  */
    144   1.3  riastrad 
    145   1.7  riastrad typedef struct linux_rwlock {
    146   1.7  riastrad 	kmutex_t	rw_lock;
    147   1.7  riastrad 	unsigned	rw_nreaders;
    148   1.7  riastrad } rwlock_t;
    149   1.7  riastrad 
    150   1.7  riastrad static inline void
    151   1.7  riastrad rwlock_init(rwlock_t *rw)
    152   1.7  riastrad {
    153   1.7  riastrad 
    154   1.7  riastrad 	mutex_init(&rw->rw_lock, MUTEX_DEFAULT, IPL_VM);
    155   1.7  riastrad 	rw->rw_nreaders = 0;
    156   1.7  riastrad }
    157   1.7  riastrad 
    158   1.7  riastrad static inline void
    159   1.7  riastrad rwlock_destroy(rwlock_t *rw)
    160   1.7  riastrad {
    161   1.7  riastrad 
    162   1.7  riastrad 	KASSERTMSG(rw->rw_nreaders == 0,
    163   1.7  riastrad 	    "rwlock still held by %u readers", rw->rw_nreaders);
    164   1.7  riastrad 	mutex_destroy(&rw->rw_lock);
    165   1.7  riastrad }
    166   1.7  riastrad 
    167   1.7  riastrad static inline void
    168   1.7  riastrad write_lock_irq(rwlock_t *rw)
    169   1.7  riastrad {
    170   1.7  riastrad 
    171   1.7  riastrad 	for (;;) {
    172   1.7  riastrad 		mutex_spin_enter(&rw->rw_lock);
    173   1.7  riastrad 		if (rw->rw_nreaders == 0)
    174   1.7  riastrad 			break;
    175   1.7  riastrad 		mutex_spin_exit(&rw->rw_lock);
    176   1.7  riastrad 	}
    177   1.7  riastrad }
    178   1.7  riastrad 
    179   1.7  riastrad static inline void
    180   1.7  riastrad write_unlock_irq(rwlock_t *rw)
    181   1.7  riastrad {
    182   1.7  riastrad 
    183   1.7  riastrad 	KASSERT(rw->rw_nreaders == 0);
    184   1.7  riastrad 	mutex_spin_exit(&rw->rw_lock);
    185   1.7  riastrad }
    186   1.7  riastrad 
    187   1.7  riastrad static inline void
    188   1.7  riastrad read_lock(rwlock_t *rw)
    189   1.7  riastrad {
    190   1.7  riastrad 
    191   1.7  riastrad 	mutex_spin_enter(&rw->rw_lock);
    192   1.7  riastrad 	KASSERT(rw->rw_nreaders < UINT_MAX);
    193   1.7  riastrad 	rw->rw_nreaders++;
    194   1.7  riastrad 	mutex_spin_exit(&rw->rw_lock);
    195   1.7  riastrad }
    196   1.7  riastrad 
    197   1.7  riastrad static inline void
    198   1.7  riastrad read_unlock(rwlock_t *rw)
    199   1.7  riastrad {
    200   1.7  riastrad 
    201   1.7  riastrad 	mutex_spin_enter(&rw->rw_lock);
    202   1.7  riastrad 	KASSERT(0 < rw->rw_nreaders);
    203   1.7  riastrad 	rw->rw_nreaders--;
    204   1.7  riastrad 	mutex_spin_exit(&rw->rw_lock);
    205   1.7  riastrad }
    206   1.3  riastrad 
    207  1.12  riastrad static inline void
    208  1.12  riastrad local_bh_disable(void)
    209  1.12  riastrad {
    210  1.12  riastrad }
    211  1.12  riastrad 
    212  1.12  riastrad static inline void
    213  1.12  riastrad local_bh_enable(void)
    214  1.12  riastrad {
    215  1.12  riastrad }
    216  1.12  riastrad 
    217  1.14  riastrad #define	atomic_dec_and_lock_irqsave(A, L, F)				      \
    218  1.14  riastrad 	_atomic_dec_and_lock_irqsave(A, L, &(F))
    219  1.14  riastrad 
    220  1.14  riastrad static inline bool __must_check
    221  1.14  riastrad _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
    222  1.14  riastrad     unsigned long *flagsp)
    223  1.14  riastrad {
    224  1.14  riastrad 	unsigned old, new;
    225  1.14  riastrad 
    226  1.14  riastrad 	do {
    227  1.14  riastrad 		old = atomic_read(atomic);
    228  1.14  riastrad 		KASSERT(old);
    229  1.14  riastrad 		if (old == 1) {
    230  1.14  riastrad 			spin_lock_irqsave(lock, *flagsp);
    231  1.14  riastrad 			if (atomic_dec_return(atomic) == 0)
    232  1.14  riastrad 				return true;
    233  1.14  riastrad 			spin_unlock_irqrestore(lock, *flagsp);
    234  1.14  riastrad 			return false;
    235  1.14  riastrad 		}
    236  1.14  riastrad 		new = old - 1;
    237  1.14  riastrad 	} while (atomic_cmpxchg(atomic, old, new) != old);
    238  1.14  riastrad 
    239  1.14  riastrad 	KASSERT(old != 1);
    240  1.14  riastrad 	KASSERT(new != 0);
    241  1.14  riastrad 	return false;
    242  1.14  riastrad }
    243  1.14  riastrad 
    244   1.2  riastrad #endif  /* _LINUX_SPINLOCK_H_ */
    245