Home | History | Annotate | Line # | Download | only in linux
spinlock.h revision 1.13
      1  1.13  riastrad /*	$NetBSD: spinlock.h,v 1.13 2021/12/19 11:47:55 riastradh Exp $	*/
      2   1.2  riastrad 
      3   1.2  riastrad /*-
      4   1.2  riastrad  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5   1.2  riastrad  * All rights reserved.
      6   1.2  riastrad  *
      7   1.2  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8   1.2  riastrad  * by Taylor R. Campbell.
      9   1.2  riastrad  *
     10   1.2  riastrad  * Redistribution and use in source and binary forms, with or without
     11   1.2  riastrad  * modification, are permitted provided that the following conditions
     12   1.2  riastrad  * are met:
     13   1.2  riastrad  * 1. Redistributions of source code must retain the above copyright
     14   1.2  riastrad  *    notice, this list of conditions and the following disclaimer.
     15   1.2  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.2  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17   1.2  riastrad  *    documentation and/or other materials provided with the distribution.
     18   1.2  riastrad  *
     19   1.2  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.2  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.2  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.2  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.2  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.2  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.2  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.2  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.2  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.2  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.2  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30   1.2  riastrad  */
     31   1.2  riastrad 
     32   1.2  riastrad #ifndef _LINUX_SPINLOCK_H_
     33   1.2  riastrad #define _LINUX_SPINLOCK_H_
     34   1.2  riastrad 
     35   1.2  riastrad #include <sys/cdefs.h>
     36   1.2  riastrad #include <sys/mutex.h>
     37   1.2  riastrad 
     38   1.7  riastrad #include <machine/limits.h>
     39   1.9  riastrad 
     40   1.8  riastrad #include <linux/irqflags.h>
     41   1.9  riastrad #include <linux/lockdep.h>
     42   1.8  riastrad #include <linux/preempt.h>
     43   1.7  riastrad 
     44   1.2  riastrad typedef struct spinlock {
     45   1.2  riastrad 	kmutex_t sl_lock;
     46   1.2  riastrad } spinlock_t;
     47   1.2  riastrad 
     48   1.2  riastrad static inline int
     49   1.2  riastrad spin_is_locked(spinlock_t *spinlock)
     50   1.2  riastrad {
     51   1.2  riastrad 	return mutex_owned(&spinlock->sl_lock);
     52   1.2  riastrad }
     53   1.2  riastrad 
     54   1.2  riastrad static inline void
     55   1.2  riastrad spin_lock(spinlock_t *spinlock)
     56   1.2  riastrad {
     57   1.2  riastrad 	mutex_enter(&spinlock->sl_lock);
     58   1.2  riastrad }
     59   1.2  riastrad 
     60   1.2  riastrad static inline void
     61   1.2  riastrad spin_unlock(spinlock_t *spinlock)
     62   1.2  riastrad {
     63   1.2  riastrad 	mutex_exit(&spinlock->sl_lock);
     64   1.2  riastrad }
     65   1.2  riastrad 
     66   1.2  riastrad static inline void
     67   1.2  riastrad spin_lock_irq(spinlock_t *spinlock)
     68   1.2  riastrad {
     69   1.2  riastrad 	spin_lock(spinlock);
     70   1.2  riastrad }
     71   1.2  riastrad 
     72   1.2  riastrad static inline void
     73   1.2  riastrad spin_unlock_irq(spinlock_t *spinlock)
     74   1.2  riastrad {
     75   1.2  riastrad 	spin_unlock(spinlock);
     76   1.2  riastrad }
     77   1.2  riastrad 
     78   1.2  riastrad /* Must be a macro because the second argument is to be assigned.  */
     79   1.2  riastrad #define	spin_lock_irqsave(SPINLOCK, FLAGS)				\
     80   1.2  riastrad 	do {								\
     81   1.2  riastrad 		(FLAGS) = 0;						\
     82   1.2  riastrad 		mutex_enter(&((spinlock_t *)(SPINLOCK))->sl_lock);	\
     83   1.2  riastrad 	} while (0)
     84   1.2  riastrad 
     85  1.13  riastrad #define	spin_trylock_irqsave(SPINLOCK, FLAGS)				\
     86  1.13  riastrad 		( (FLAGS) = 0,						\
     87  1.13  riastrad 		mutex_tryenter(&((spinlock_t *)(SPINLOCK))->sl_lock) )
     88  1.13  riastrad 
     89   1.2  riastrad static inline void
     90   1.2  riastrad spin_unlock_irqrestore(spinlock_t *spinlock, unsigned long __unused flags)
     91   1.2  riastrad {
     92   1.2  riastrad 	mutex_exit(&spinlock->sl_lock);
     93   1.2  riastrad }
     94   1.2  riastrad 
     95   1.2  riastrad static inline void
     96  1.10  riastrad spin_lock_nested(spinlock_t *spinlock, int subclass)
     97  1.10  riastrad {
     98  1.10  riastrad 	spin_lock(spinlock);
     99  1.10  riastrad }
    100  1.10  riastrad 
    101  1.11  riastrad #define	spin_lock_irqsave_nested(SPINLOCK, FLAGS, SUBCLASS)		      \
    102  1.11  riastrad 	spin_lock_irqsave(SPINLOCK, FLAGS)
    103  1.11  riastrad 
    104  1.10  riastrad static inline void
    105   1.2  riastrad spin_lock_init(spinlock_t *spinlock)
    106   1.2  riastrad {
    107   1.6       mrg 	/* XXX What's the right IPL?  IPL_DRM...?  */
    108   1.6       mrg 	mutex_init(&spinlock->sl_lock, MUTEX_DEFAULT, IPL_VM);
    109   1.2  riastrad }
    110   1.2  riastrad 
    111   1.2  riastrad /*
    112   1.2  riastrad  * XXX Linux doesn't ever destroy spin locks, it seems.  We'll have to
    113   1.2  riastrad  * kludge it up.
    114   1.2  riastrad  */
    115   1.2  riastrad 
    116   1.2  riastrad static inline void
    117   1.2  riastrad spin_lock_destroy(spinlock_t *spinlock)
    118   1.2  riastrad {
    119   1.2  riastrad 	mutex_destroy(&spinlock->sl_lock);
    120   1.2  riastrad }
    121   1.2  riastrad 
    122   1.2  riastrad /* This is a macro to make the panic message clearer.  */
    123   1.2  riastrad #define	assert_spin_locked(spinlock)	\
    124   1.2  riastrad 	KASSERT(mutex_owned(&(spinlock)->sl_lock))
    125   1.2  riastrad 
    126   1.3  riastrad /*
    127   1.7  riastrad  * Stupid reader/writer spin locks.  No attempt to avoid writer
    128   1.7  riastrad  * starvation.  Must allow recursive readers.  We use mutex and state
    129   1.7  riastrad  * instead of compare-and-swap for expedience and LOCKDEBUG support.
    130   1.3  riastrad  */
    131   1.3  riastrad 
    132   1.7  riastrad typedef struct linux_rwlock {
    133   1.7  riastrad 	kmutex_t	rw_lock;
    134   1.7  riastrad 	unsigned	rw_nreaders;
    135   1.7  riastrad } rwlock_t;
    136   1.7  riastrad 
    137   1.7  riastrad static inline void
    138   1.7  riastrad rwlock_init(rwlock_t *rw)
    139   1.7  riastrad {
    140   1.7  riastrad 
    141   1.7  riastrad 	mutex_init(&rw->rw_lock, MUTEX_DEFAULT, IPL_VM);
    142   1.7  riastrad 	rw->rw_nreaders = 0;
    143   1.7  riastrad }
    144   1.7  riastrad 
    145   1.7  riastrad static inline void
    146   1.7  riastrad rwlock_destroy(rwlock_t *rw)
    147   1.7  riastrad {
    148   1.7  riastrad 
    149   1.7  riastrad 	KASSERTMSG(rw->rw_nreaders == 0,
    150   1.7  riastrad 	    "rwlock still held by %u readers", rw->rw_nreaders);
    151   1.7  riastrad 	mutex_destroy(&rw->rw_lock);
    152   1.7  riastrad }
    153   1.7  riastrad 
    154   1.7  riastrad static inline void
    155   1.7  riastrad write_lock_irq(rwlock_t *rw)
    156   1.7  riastrad {
    157   1.7  riastrad 
    158   1.7  riastrad 	for (;;) {
    159   1.7  riastrad 		mutex_spin_enter(&rw->rw_lock);
    160   1.7  riastrad 		if (rw->rw_nreaders == 0)
    161   1.7  riastrad 			break;
    162   1.7  riastrad 		mutex_spin_exit(&rw->rw_lock);
    163   1.7  riastrad 	}
    164   1.7  riastrad }
    165   1.7  riastrad 
    166   1.7  riastrad static inline void
    167   1.7  riastrad write_unlock_irq(rwlock_t *rw)
    168   1.7  riastrad {
    169   1.7  riastrad 
    170   1.7  riastrad 	KASSERT(rw->rw_nreaders == 0);
    171   1.7  riastrad 	mutex_spin_exit(&rw->rw_lock);
    172   1.7  riastrad }
    173   1.7  riastrad 
    174   1.7  riastrad static inline void
    175   1.7  riastrad read_lock(rwlock_t *rw)
    176   1.7  riastrad {
    177   1.7  riastrad 
    178   1.7  riastrad 	mutex_spin_enter(&rw->rw_lock);
    179   1.7  riastrad 	KASSERT(rw->rw_nreaders < UINT_MAX);
    180   1.7  riastrad 	rw->rw_nreaders++;
    181   1.7  riastrad 	mutex_spin_exit(&rw->rw_lock);
    182   1.7  riastrad }
    183   1.7  riastrad 
    184   1.7  riastrad static inline void
    185   1.7  riastrad read_unlock(rwlock_t *rw)
    186   1.7  riastrad {
    187   1.7  riastrad 
    188   1.7  riastrad 	mutex_spin_enter(&rw->rw_lock);
    189   1.7  riastrad 	KASSERT(0 < rw->rw_nreaders);
    190   1.7  riastrad 	rw->rw_nreaders--;
    191   1.7  riastrad 	mutex_spin_exit(&rw->rw_lock);
    192   1.7  riastrad }
    193   1.3  riastrad 
    194  1.12  riastrad static inline void
    195  1.12  riastrad local_bh_disable(void)
    196  1.12  riastrad {
    197  1.12  riastrad }
    198  1.12  riastrad 
    199  1.12  riastrad static inline void
    200  1.12  riastrad local_bh_enable(void)
    201  1.12  riastrad {
    202  1.12  riastrad }
    203  1.12  riastrad 
    204   1.2  riastrad #endif  /* _LINUX_SPINLOCK_H_ */
    205