Home | History | Annotate | Line # | Download | only in linux
spinlock.h revision 1.3.4.3
      1  1.3.4.2       tls /*	$NetBSD: spinlock.h,v 1.3.4.3 2017/12/03 11:37:59 jdolecek Exp $	*/
      2  1.3.4.2       tls 
      3  1.3.4.2       tls /*-
      4  1.3.4.2       tls  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  1.3.4.2       tls  * All rights reserved.
      6  1.3.4.2       tls  *
      7  1.3.4.2       tls  * This code is derived from software contributed to The NetBSD Foundation
      8  1.3.4.2       tls  * by Taylor R. Campbell.
      9  1.3.4.2       tls  *
     10  1.3.4.2       tls  * Redistribution and use in source and binary forms, with or without
     11  1.3.4.2       tls  * modification, are permitted provided that the following conditions
     12  1.3.4.2       tls  * are met:
     13  1.3.4.2       tls  * 1. Redistributions of source code must retain the above copyright
     14  1.3.4.2       tls  *    notice, this list of conditions and the following disclaimer.
     15  1.3.4.2       tls  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.3.4.2       tls  *    notice, this list of conditions and the following disclaimer in the
     17  1.3.4.2       tls  *    documentation and/or other materials provided with the distribution.
     18  1.3.4.2       tls  *
     19  1.3.4.2       tls  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.3.4.2       tls  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.3.4.2       tls  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.3.4.2       tls  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.3.4.2       tls  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.3.4.2       tls  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.3.4.2       tls  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.3.4.2       tls  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.3.4.2       tls  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.3.4.2       tls  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.3.4.2       tls  * POSSIBILITY OF SUCH DAMAGE.
     30  1.3.4.2       tls  */
     31  1.3.4.2       tls 
     32  1.3.4.2       tls #ifndef _LINUX_SPINLOCK_H_
     33  1.3.4.2       tls #define _LINUX_SPINLOCK_H_
     34  1.3.4.2       tls 
     35  1.3.4.2       tls #include <sys/cdefs.h>
     36  1.3.4.2       tls #include <sys/mutex.h>
     37  1.3.4.2       tls 
     38  1.3.4.3  jdolecek #include <machine/limits.h>
     39  1.3.4.3  jdolecek 
     40  1.3.4.3  jdolecek #define	__acquires(lock)	/* XXX lockdep stuff */
     41  1.3.4.3  jdolecek #define	__releases(lock)	/* XXX lockdep stuff */
     42  1.3.4.3  jdolecek 
     43  1.3.4.2       tls typedef struct spinlock {
     44  1.3.4.2       tls 	kmutex_t sl_lock;
     45  1.3.4.2       tls } spinlock_t;
     46  1.3.4.2       tls 
     47  1.3.4.2       tls static inline int
     48  1.3.4.2       tls spin_is_locked(spinlock_t *spinlock)
     49  1.3.4.2       tls {
     50  1.3.4.2       tls 	return mutex_owned(&spinlock->sl_lock);
     51  1.3.4.2       tls }
     52  1.3.4.2       tls 
     53  1.3.4.2       tls static inline void
     54  1.3.4.2       tls spin_lock(spinlock_t *spinlock)
     55  1.3.4.2       tls {
     56  1.3.4.2       tls 	mutex_enter(&spinlock->sl_lock);
     57  1.3.4.2       tls }
     58  1.3.4.2       tls 
     59  1.3.4.2       tls static inline void
     60  1.3.4.2       tls spin_unlock(spinlock_t *spinlock)
     61  1.3.4.2       tls {
     62  1.3.4.2       tls 	mutex_exit(&spinlock->sl_lock);
     63  1.3.4.2       tls }
     64  1.3.4.2       tls 
     65  1.3.4.2       tls static inline void
     66  1.3.4.2       tls spin_lock_irq(spinlock_t *spinlock)
     67  1.3.4.2       tls {
     68  1.3.4.2       tls 	spin_lock(spinlock);
     69  1.3.4.2       tls }
     70  1.3.4.2       tls 
     71  1.3.4.2       tls static inline void
     72  1.3.4.2       tls spin_unlock_irq(spinlock_t *spinlock)
     73  1.3.4.2       tls {
     74  1.3.4.2       tls 	spin_unlock(spinlock);
     75  1.3.4.2       tls }
     76  1.3.4.2       tls 
     77  1.3.4.2       tls /* Must be a macro because the second argument is to be assigned.  */
     78  1.3.4.2       tls #define	spin_lock_irqsave(SPINLOCK, FLAGS)				\
     79  1.3.4.2       tls 	do {								\
     80  1.3.4.2       tls 		(FLAGS) = 0;						\
     81  1.3.4.2       tls 		mutex_enter(&((spinlock_t *)(SPINLOCK))->sl_lock);	\
     82  1.3.4.2       tls 	} while (0)
     83  1.3.4.2       tls 
     84  1.3.4.2       tls static inline void
     85  1.3.4.2       tls spin_unlock_irqrestore(spinlock_t *spinlock, unsigned long __unused flags)
     86  1.3.4.2       tls {
     87  1.3.4.2       tls 	mutex_exit(&spinlock->sl_lock);
     88  1.3.4.2       tls }
     89  1.3.4.2       tls 
     90  1.3.4.2       tls static inline void
     91  1.3.4.2       tls spin_lock_init(spinlock_t *spinlock)
     92  1.3.4.2       tls {
     93  1.3.4.2       tls 	/* XXX What's the right IPL?  IPL_DRM...?  */
     94  1.3.4.2       tls 	mutex_init(&spinlock->sl_lock, MUTEX_DEFAULT, IPL_VM);
     95  1.3.4.2       tls }
     96  1.3.4.2       tls 
     97  1.3.4.2       tls /*
     98  1.3.4.2       tls  * XXX Linux doesn't ever destroy spin locks, it seems.  We'll have to
     99  1.3.4.2       tls  * kludge it up.
    100  1.3.4.2       tls  */
    101  1.3.4.2       tls 
    102  1.3.4.2       tls static inline void
    103  1.3.4.2       tls spin_lock_destroy(spinlock_t *spinlock)
    104  1.3.4.2       tls {
    105  1.3.4.2       tls 	mutex_destroy(&spinlock->sl_lock);
    106  1.3.4.2       tls }
    107  1.3.4.2       tls 
    108  1.3.4.2       tls /* This is a macro to make the panic message clearer.  */
    109  1.3.4.2       tls #define	assert_spin_locked(spinlock)	\
    110  1.3.4.2       tls 	KASSERT(mutex_owned(&(spinlock)->sl_lock))
    111  1.3.4.2       tls 
    112  1.3.4.2       tls /*
    113  1.3.4.3  jdolecek  * Stupid reader/writer spin locks.  No attempt to avoid writer
    114  1.3.4.3  jdolecek  * starvation.  Must allow recursive readers.  We use mutex and state
    115  1.3.4.3  jdolecek  * instead of compare-and-swap for expedience and LOCKDEBUG support.
    116  1.3.4.2       tls  */
    117  1.3.4.2       tls 
    118  1.3.4.3  jdolecek typedef struct linux_rwlock {
    119  1.3.4.3  jdolecek 	kmutex_t	rw_lock;
    120  1.3.4.3  jdolecek 	unsigned	rw_nreaders;
    121  1.3.4.3  jdolecek } rwlock_t;
    122  1.3.4.3  jdolecek 
    123  1.3.4.3  jdolecek static inline void
    124  1.3.4.3  jdolecek rwlock_init(rwlock_t *rw)
    125  1.3.4.3  jdolecek {
    126  1.3.4.3  jdolecek 
    127  1.3.4.3  jdolecek 	mutex_init(&rw->rw_lock, MUTEX_DEFAULT, IPL_VM);
    128  1.3.4.3  jdolecek 	rw->rw_nreaders = 0;
    129  1.3.4.3  jdolecek }
    130  1.3.4.3  jdolecek 
    131  1.3.4.3  jdolecek static inline void
    132  1.3.4.3  jdolecek rwlock_destroy(rwlock_t *rw)
    133  1.3.4.3  jdolecek {
    134  1.3.4.3  jdolecek 
    135  1.3.4.3  jdolecek 	KASSERTMSG(rw->rw_nreaders == 0,
    136  1.3.4.3  jdolecek 	    "rwlock still held by %u readers", rw->rw_nreaders);
    137  1.3.4.3  jdolecek 	mutex_destroy(&rw->rw_lock);
    138  1.3.4.3  jdolecek }
    139  1.3.4.3  jdolecek 
    140  1.3.4.3  jdolecek static inline void
    141  1.3.4.3  jdolecek write_lock_irq(rwlock_t *rw)
    142  1.3.4.3  jdolecek {
    143  1.3.4.3  jdolecek 
    144  1.3.4.3  jdolecek 	for (;;) {
    145  1.3.4.3  jdolecek 		mutex_spin_enter(&rw->rw_lock);
    146  1.3.4.3  jdolecek 		if (rw->rw_nreaders == 0)
    147  1.3.4.3  jdolecek 			break;
    148  1.3.4.3  jdolecek 		mutex_spin_exit(&rw->rw_lock);
    149  1.3.4.3  jdolecek 	}
    150  1.3.4.3  jdolecek }
    151  1.3.4.3  jdolecek 
    152  1.3.4.3  jdolecek static inline void
    153  1.3.4.3  jdolecek write_unlock_irq(rwlock_t *rw)
    154  1.3.4.3  jdolecek {
    155  1.3.4.3  jdolecek 
    156  1.3.4.3  jdolecek 	KASSERT(rw->rw_nreaders == 0);
    157  1.3.4.3  jdolecek 	mutex_spin_exit(&rw->rw_lock);
    158  1.3.4.3  jdolecek }
    159  1.3.4.3  jdolecek 
    160  1.3.4.3  jdolecek static inline void
    161  1.3.4.3  jdolecek read_lock(rwlock_t *rw)
    162  1.3.4.3  jdolecek {
    163  1.3.4.3  jdolecek 
    164  1.3.4.3  jdolecek 	mutex_spin_enter(&rw->rw_lock);
    165  1.3.4.3  jdolecek 	KASSERT(rw->rw_nreaders < UINT_MAX);
    166  1.3.4.3  jdolecek 	rw->rw_nreaders++;
    167  1.3.4.3  jdolecek 	mutex_spin_exit(&rw->rw_lock);
    168  1.3.4.3  jdolecek }
    169  1.3.4.3  jdolecek 
    170  1.3.4.3  jdolecek static inline void
    171  1.3.4.3  jdolecek read_unlock(rwlock_t *rw)
    172  1.3.4.3  jdolecek {
    173  1.3.4.3  jdolecek 
    174  1.3.4.3  jdolecek 	mutex_spin_enter(&rw->rw_lock);
    175  1.3.4.3  jdolecek 	KASSERT(0 < rw->rw_nreaders);
    176  1.3.4.3  jdolecek 	rw->rw_nreaders--;
    177  1.3.4.3  jdolecek 	mutex_spin_exit(&rw->rw_lock);
    178  1.3.4.3  jdolecek }
    179  1.3.4.2       tls 
    180  1.3.4.2       tls #endif  /* _LINUX_SPINLOCK_H_ */
    181