Home | History | Annotate | Line # | Download | only in include
lock.h revision 1.32
      1  1.32  christos /*	$NetBSD: lock.h,v 1.32 2017/09/17 00:01:08 christos Exp $ */
      2   1.1        pk 
      3   1.1        pk /*-
      4  1.24        ad  * Copyright (c) 1998, 1999, 2006 The NetBSD Foundation, Inc.
      5   1.1        pk  * All rights reserved.
      6   1.1        pk  *
      7   1.1        pk  * This code is derived from software contributed to The NetBSD Foundation
      8  1.24        ad  * by Paul Kranenburg and Andrew Doran.
      9   1.1        pk  *
     10   1.1        pk  * Redistribution and use in source and binary forms, with or without
     11   1.1        pk  * modification, are permitted provided that the following conditions
     12   1.1        pk  * are met:
     13   1.1        pk  * 1. Redistributions of source code must retain the above copyright
     14   1.1        pk  *    notice, this list of conditions and the following disclaimer.
     15   1.1        pk  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1        pk  *    notice, this list of conditions and the following disclaimer in the
     17   1.1        pk  *    documentation and/or other materials provided with the distribution.
     18   1.1        pk  *
     19   1.1        pk  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1        pk  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1        pk  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1        pk  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1        pk  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1        pk  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1        pk  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1        pk  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1        pk  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1        pk  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1        pk  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1        pk  */
     31   1.1        pk 
     32   1.1        pk #ifndef _MACHINE_LOCK_H
     33   1.1        pk #define _MACHINE_LOCK_H
     34   1.1        pk 
     35   1.1        pk /*
     36   1.1        pk  * Machine dependent spin lock operations.
     37   1.1        pk  */
     38   1.5   thorpej 
     39  1.15        pk #if __SIMPLELOCK_UNLOCKED != 0
     40  1.15        pk #error __SIMPLELOCK_UNLOCKED must be 0 for this implementation
     41  1.15        pk #endif
     42  1.15        pk 
     43   1.7   thorpej /* XXX So we can expose this to userland. */
     44  1.10   hannken #ifdef __lint__
     45  1.10   hannken #define __ldstub(__addr)	(__addr)
     46  1.10   hannken #else /* !__lint__ */
     47  1.22     perry static __inline int __ldstub(__cpu_simple_lock_t *addr);
     48  1.22     perry static __inline int __ldstub(__cpu_simple_lock_t *addr)
     49  1.14       mrg {
     50  1.14       mrg 	int v;
     51  1.14       mrg 
     52  1.20     perry 	__asm volatile("ldstub [%1],%0"
     53  1.18       chs 	    : "=&r" (v)
     54  1.14       mrg 	    : "r" (addr)
     55  1.14       mrg 	    : "memory");
     56  1.14       mrg 
     57  1.14       mrg 	return v;
     58  1.14       mrg }
     59  1.10   hannken #endif /* __lint__ */
     60   1.7   thorpej 
     61  1.23       uwe static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *)
     62   1.7   thorpej 	__attribute__((__unused__));
     63  1.23       uwe static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *)
     64   1.7   thorpej 	__attribute__((__unused__));
     65  1.23       uwe static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *)
     66   1.7   thorpej 	__attribute__((__unused__));
     67  1.17        pk #ifndef __CPU_SIMPLE_LOCK_NOINLINE
     68  1.23       uwe static __inline void __cpu_simple_lock(__cpu_simple_lock_t *)
     69  1.15        pk 	__attribute__((__unused__));
     70  1.15        pk #else
     71  1.23       uwe extern void __cpu_simple_lock(__cpu_simple_lock_t *);
     72  1.12        pk #endif
     73  1.12        pk 
     74  1.29     skrll static __inline int
     75  1.32  christos __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr)
     76  1.29     skrll {
     77  1.29     skrll 	return *__ptr == __SIMPLELOCK_LOCKED;
     78  1.29     skrll }
     79  1.29     skrll 
     80  1.29     skrll static __inline int
     81  1.32  christos __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr)
     82  1.29     skrll {
     83  1.29     skrll 	return *__ptr == __SIMPLELOCK_UNLOCKED;
     84  1.29     skrll }
     85  1.29     skrll 
     86  1.29     skrll static __inline void
     87  1.29     skrll __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
     88  1.29     skrll {
     89  1.29     skrll 	*__ptr = __SIMPLELOCK_UNLOCKED;
     90  1.29     skrll }
     91  1.29     skrll 
     92  1.29     skrll static __inline void
     93  1.29     skrll __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
     94  1.29     skrll {
     95  1.29     skrll 	*__ptr = __SIMPLELOCK_LOCKED;
     96  1.29     skrll }
     97  1.29     skrll 
     98  1.22     perry static __inline void
     99   1.9   thorpej __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
    100   1.1        pk {
    101   1.2        pk 
    102   1.7   thorpej 	*alp = __SIMPLELOCK_UNLOCKED;
    103   1.1        pk }
    104   1.1        pk 
    105  1.17        pk #ifndef __CPU_SIMPLE_LOCK_NOINLINE
    106  1.22     perry static __inline void
    107   1.9   thorpej __cpu_simple_lock(__cpu_simple_lock_t *alp)
    108   1.1        pk {
    109   1.2        pk 
    110   1.2        pk 	/*
    111   1.7   thorpej 	 * If someone else holds the lock use simple reads until it
    112   1.7   thorpej 	 * is released, then retry the atomic operation. This reduces
    113   1.7   thorpej 	 * memory bus contention because the cache-coherency logic
    114   1.7   thorpej 	 * does not have to broadcast invalidates on the lock while
    115   1.7   thorpej 	 * we spin on it.
    116   1.2        pk 	 */
    117   1.7   thorpej 	while (__ldstub(alp) != __SIMPLELOCK_UNLOCKED) {
    118   1.7   thorpej 		while (*alp != __SIMPLELOCK_UNLOCKED)
    119   1.7   thorpej 			/* spin */ ;
    120   1.2        pk 	}
    121   1.1        pk }
    122  1.17        pk #endif /* __CPU_SIMPLE_LOCK_NOINLINE */
    123   1.1        pk 
    124  1.22     perry static __inline int
    125   1.9   thorpej __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
    126   1.1        pk {
    127   1.2        pk 
    128   1.7   thorpej 	return (__ldstub(alp) == __SIMPLELOCK_UNLOCKED);
    129   1.1        pk }
    130   1.1        pk 
    131  1.22     perry static __inline void
    132   1.9   thorpej __cpu_simple_unlock(__cpu_simple_lock_t *alp)
    133   1.1        pk {
    134   1.2        pk 
    135  1.12        pk 	/*
    136  1.13        pk 	 * Insert compiler barrier to prevent instruction re-ordering
    137  1.13        pk 	 * around the lock release.
    138  1.12        pk 	 */
    139  1.13        pk 	__insn_barrier();
    140  1.13        pk 	*alp = __SIMPLELOCK_UNLOCKED;
    141   1.1        pk }
    142   1.1        pk 
    143  1.24        ad #if defined(__sparc_v9__)
    144  1.25        ad static __inline void
    145  1.24        ad mb_read(void)
    146  1.24        ad {
    147  1.24        ad 	__asm __volatile("membar #LoadLoad" : : : "memory");
    148  1.24        ad }
    149  1.24        ad 
    150  1.25        ad static __inline void
    151  1.24        ad mb_write(void)
    152  1.24        ad {
    153  1.24        ad 	__asm __volatile("" : : : "memory");
    154  1.24        ad }
    155  1.24        ad 
    156  1.25        ad static __inline void
    157  1.24        ad mb_memory(void)
    158  1.24        ad {
    159  1.24        ad 	__asm __volatile("membar #MemIssue" : : : "memory");
    160  1.24        ad }
    161  1.24        ad #else	/* __sparc_v9__ */
    162  1.25        ad static __inline void
    163  1.24        ad mb_read(void)
    164  1.24        ad {
    165  1.24        ad 	static volatile int junk;
    166  1.28        ad 	__asm volatile("st %%g0,[%0]"
    167  1.28        ad 	    :
    168  1.28        ad 	    : "r" (&junk)
    169  1.28        ad 	    : "memory");
    170  1.24        ad }
    171  1.24        ad 
    172  1.25        ad static __inline void
    173  1.24        ad mb_write(void)
    174  1.24        ad {
    175  1.24        ad 	__insn_barrier();
    176  1.24        ad }
    177  1.24        ad 
    178  1.25        ad static __inline void
    179  1.24        ad mb_memory(void)
    180  1.24        ad {
    181  1.27        ad 	mb_read();
    182  1.24        ad }
    183  1.24        ad #endif	/* __sparc_v9__ */
    184  1.24        ad 
    185   1.1        pk #endif /* _MACHINE_LOCK_H */
    186