Home | History | Annotate | Line # | Download | only in linux
spinlock.h revision 1.3.2.2
      1  1.3.2.2       snj /*	$NetBSD: spinlock.h,v 1.3.2.2 2016/04/15 08:46:42 snj Exp $	*/
      2      1.2  riastrad 
      3      1.2  riastrad /*-
      4      1.2  riastrad  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5      1.2  riastrad  * All rights reserved.
      6      1.2  riastrad  *
      7      1.2  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8      1.2  riastrad  * by Taylor R. Campbell.
      9      1.2  riastrad  *
     10      1.2  riastrad  * Redistribution and use in source and binary forms, with or without
     11      1.2  riastrad  * modification, are permitted provided that the following conditions
     12      1.2  riastrad  * are met:
     13      1.2  riastrad  * 1. Redistributions of source code must retain the above copyright
     14      1.2  riastrad  *    notice, this list of conditions and the following disclaimer.
     15      1.2  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16      1.2  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17      1.2  riastrad  *    documentation and/or other materials provided with the distribution.
     18      1.2  riastrad  *
     19      1.2  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20      1.2  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21      1.2  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22      1.2  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23      1.2  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24      1.2  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25      1.2  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26      1.2  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27      1.2  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28      1.2  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29      1.2  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30      1.2  riastrad  */
     31      1.2  riastrad 
     32      1.2  riastrad #ifndef _LINUX_SPINLOCK_H_
     33      1.2  riastrad #define _LINUX_SPINLOCK_H_
     34      1.2  riastrad 
     35      1.2  riastrad #include <sys/cdefs.h>
     36      1.2  riastrad #include <sys/mutex.h>
     37      1.2  riastrad 
     38  1.3.2.2       snj #include <machine/limits.h>
     39  1.3.2.2       snj 
     40  1.3.2.1       snj #define	__acquires(lock)	/* XXX lockdep stuff */
     41  1.3.2.1       snj #define	__releases(lock)	/* XXX lockdep stuff */
     42  1.3.2.1       snj 
     43      1.2  riastrad typedef struct spinlock {
     44      1.2  riastrad 	kmutex_t sl_lock;
     45      1.2  riastrad } spinlock_t;
     46      1.2  riastrad 
     47      1.2  riastrad static inline int
     48      1.2  riastrad spin_is_locked(spinlock_t *spinlock)
     49      1.2  riastrad {
     50      1.2  riastrad 	return mutex_owned(&spinlock->sl_lock);
     51      1.2  riastrad }
     52      1.2  riastrad 
     53      1.2  riastrad static inline void
     54      1.2  riastrad spin_lock(spinlock_t *spinlock)
     55      1.2  riastrad {
     56      1.2  riastrad 	mutex_enter(&spinlock->sl_lock);
     57      1.2  riastrad }
     58      1.2  riastrad 
     59      1.2  riastrad static inline void
     60      1.2  riastrad spin_unlock(spinlock_t *spinlock)
     61      1.2  riastrad {
     62      1.2  riastrad 	mutex_exit(&spinlock->sl_lock);
     63      1.2  riastrad }
     64      1.2  riastrad 
     65      1.2  riastrad static inline void
     66      1.2  riastrad spin_lock_irq(spinlock_t *spinlock)
     67      1.2  riastrad {
     68      1.2  riastrad 	spin_lock(spinlock);
     69      1.2  riastrad }
     70      1.2  riastrad 
     71      1.2  riastrad static inline void
     72      1.2  riastrad spin_unlock_irq(spinlock_t *spinlock)
     73      1.2  riastrad {
     74      1.2  riastrad 	spin_unlock(spinlock);
     75      1.2  riastrad }
     76      1.2  riastrad 
     77      1.2  riastrad /* Must be a macro because the second argument is to be assigned.  */
     78      1.2  riastrad #define	spin_lock_irqsave(SPINLOCK, FLAGS)				\
     79      1.2  riastrad 	do {								\
     80      1.2  riastrad 		(FLAGS) = 0;						\
     81      1.2  riastrad 		mutex_enter(&((spinlock_t *)(SPINLOCK))->sl_lock);	\
     82      1.2  riastrad 	} while (0)
     83      1.2  riastrad 
     84      1.2  riastrad static inline void
     85      1.2  riastrad spin_unlock_irqrestore(spinlock_t *spinlock, unsigned long __unused flags)
     86      1.2  riastrad {
     87      1.2  riastrad 	mutex_exit(&spinlock->sl_lock);
     88      1.2  riastrad }
     89      1.2  riastrad 
     90      1.2  riastrad static inline void
     91      1.2  riastrad spin_lock_init(spinlock_t *spinlock)
     92      1.2  riastrad {
     93      1.2  riastrad 	/* XXX What's the right IPL?  IPL_DRM...?  */
     94      1.2  riastrad 	mutex_init(&spinlock->sl_lock, MUTEX_DEFAULT, IPL_VM);
     95      1.2  riastrad }
     96      1.2  riastrad 
     97      1.2  riastrad /*
     98      1.2  riastrad  * XXX Linux doesn't ever destroy spin locks, it seems.  We'll have to
     99      1.2  riastrad  * kludge it up.
    100      1.2  riastrad  */
    101      1.2  riastrad 
    102      1.2  riastrad static inline void
    103      1.2  riastrad spin_lock_destroy(spinlock_t *spinlock)
    104      1.2  riastrad {
    105      1.2  riastrad 	mutex_destroy(&spinlock->sl_lock);
    106      1.2  riastrad }
    107      1.2  riastrad 
    108      1.2  riastrad /* This is a macro to make the panic message clearer.  */
    109      1.2  riastrad #define	assert_spin_locked(spinlock)	\
    110      1.2  riastrad 	KASSERT(mutex_owned(&(spinlock)->sl_lock))
    111      1.2  riastrad 
    112      1.3  riastrad /*
    113  1.3.2.2       snj  * Stupid reader/writer spin locks.  No attempt to avoid writer
    114  1.3.2.2       snj  * starvation.  Must allow recursive readers.  We use mutex and state
    115  1.3.2.2       snj  * instead of compare-and-swap for expedience and LOCKDEBUG support.
    116      1.3  riastrad  */
    117      1.3  riastrad 
    118  1.3.2.2       snj typedef struct linux_rwlock {
    119  1.3.2.2       snj 	kmutex_t	rw_lock;
    120  1.3.2.2       snj 	unsigned	rw_nreaders;
    121  1.3.2.2       snj } rwlock_t;
    122  1.3.2.2       snj 
    123  1.3.2.2       snj static inline void
    124  1.3.2.2       snj rwlock_init(rwlock_t *rw)
    125  1.3.2.2       snj {
    126  1.3.2.2       snj 
    127  1.3.2.2       snj 	mutex_init(&rw->rw_lock, MUTEX_DEFAULT, IPL_VM);
    128  1.3.2.2       snj 	rw->rw_nreaders = 0;
    129  1.3.2.2       snj }
    130  1.3.2.2       snj 
    131  1.3.2.2       snj static inline void
    132  1.3.2.2       snj rwlock_destroy(rwlock_t *rw)
    133  1.3.2.2       snj {
    134  1.3.2.2       snj 
    135  1.3.2.2       snj 	KASSERTMSG(rw->rw_nreaders == 0,
    136  1.3.2.2       snj 	    "rwlock still held by %u readers", rw->rw_nreaders);
    137  1.3.2.2       snj 	mutex_destroy(&rw->rw_lock);
    138  1.3.2.2       snj }
    139  1.3.2.2       snj 
    140  1.3.2.2       snj static inline void
    141  1.3.2.2       snj write_lock_irq(rwlock_t *rw)
    142  1.3.2.2       snj {
    143  1.3.2.2       snj 
    144  1.3.2.2       snj 	for (;;) {
    145  1.3.2.2       snj 		mutex_spin_enter(&rw->rw_lock);
    146  1.3.2.2       snj 		if (rw->rw_nreaders == 0)
    147  1.3.2.2       snj 			break;
    148  1.3.2.2       snj 		mutex_spin_exit(&rw->rw_lock);
    149  1.3.2.2       snj 	}
    150  1.3.2.2       snj }
    151  1.3.2.2       snj 
    152  1.3.2.2       snj static inline void
    153  1.3.2.2       snj write_unlock_irq(rwlock_t *rw)
    154  1.3.2.2       snj {
    155  1.3.2.2       snj 
    156  1.3.2.2       snj 	KASSERT(rw->rw_nreaders == 0);
    157  1.3.2.2       snj 	mutex_spin_exit(&rw->rw_lock);
    158  1.3.2.2       snj }
    159  1.3.2.2       snj 
    160  1.3.2.2       snj static inline void
    161  1.3.2.2       snj read_lock(rwlock_t *rw)
    162  1.3.2.2       snj {
    163  1.3.2.2       snj 
    164  1.3.2.2       snj 	mutex_spin_enter(&rw->rw_lock);
    165  1.3.2.2       snj 	KASSERT(rw->rw_nreaders < UINT_MAX);
    166  1.3.2.2       snj 	rw->rw_nreaders++;
    167  1.3.2.2       snj 	mutex_spin_exit(&rw->rw_lock);
    168  1.3.2.2       snj }
    169  1.3.2.2       snj 
    170  1.3.2.2       snj static inline void
    171  1.3.2.2       snj read_unlock(rwlock_t *rw)
    172  1.3.2.2       snj {
    173  1.3.2.2       snj 
    174  1.3.2.2       snj 	mutex_spin_enter(&rw->rw_lock);
    175  1.3.2.2       snj 	KASSERT(0 < rw->rw_nreaders);
    176  1.3.2.2       snj 	rw->rw_nreaders--;
    177  1.3.2.2       snj 	mutex_spin_exit(&rw->rw_lock);
    178  1.3.2.2       snj }
    179      1.3  riastrad 
    180      1.2  riastrad #endif  /* _LINUX_SPINLOCK_H_ */
    181