Home | History | Annotate | Line # | Download | only in include
      1  1.34  riastrad /*	$NetBSD: lock.h,v 1.34 2022/02/13 13:41:17 riastradh Exp $ */
      2   1.1        pk 
      3   1.1        pk /*-
      4  1.24        ad  * Copyright (c) 1998, 1999, 2006 The NetBSD Foundation, Inc.
      5   1.1        pk  * All rights reserved.
      6   1.1        pk  *
      7   1.1        pk  * This code is derived from software contributed to The NetBSD Foundation
      8  1.24        ad  * by Paul Kranenburg and Andrew Doran.
      9   1.1        pk  *
     10   1.1        pk  * Redistribution and use in source and binary forms, with or without
     11   1.1        pk  * modification, are permitted provided that the following conditions
     12   1.1        pk  * are met:
     13   1.1        pk  * 1. Redistributions of source code must retain the above copyright
     14   1.1        pk  *    notice, this list of conditions and the following disclaimer.
     15   1.1        pk  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1        pk  *    notice, this list of conditions and the following disclaimer in the
     17   1.1        pk  *    documentation and/or other materials provided with the distribution.
     18   1.1        pk  *
     19   1.1        pk  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1        pk  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1        pk  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1        pk  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1        pk  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1        pk  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1        pk  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1        pk  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1        pk  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1        pk  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1        pk  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1        pk  */
     31   1.1        pk 
     32   1.1        pk #ifndef _MACHINE_LOCK_H
     33   1.1        pk #define _MACHINE_LOCK_H
     34   1.1        pk 
     35   1.1        pk /*
     36   1.1        pk  * Machine dependent spin lock operations.
     37   1.1        pk  */
     38   1.5   thorpej 
     39  1.15        pk #if __SIMPLELOCK_UNLOCKED != 0
     40  1.15        pk #error __SIMPLELOCK_UNLOCKED must be 0 for this implementation
     41  1.15        pk #endif
     42  1.15        pk 
     43   1.7   thorpej /* XXX So we can expose this to userland. */
     44  1.10   hannken #ifdef __lint__
     45  1.10   hannken #define __ldstub(__addr)	(__addr)
     46  1.10   hannken #else /* !__lint__ */
     47  1.22     perry static __inline int __ldstub(__cpu_simple_lock_t *addr);
     48  1.22     perry static __inline int __ldstub(__cpu_simple_lock_t *addr)
     49  1.14       mrg {
     50  1.14       mrg 	int v;
     51  1.14       mrg 
     52  1.20     perry 	__asm volatile("ldstub [%1],%0"
     53  1.18       chs 	    : "=&r" (v)
     54  1.14       mrg 	    : "r" (addr)
     55  1.14       mrg 	    : "memory");
     56  1.14       mrg 
     57  1.14       mrg 	return v;
     58  1.14       mrg }
     59  1.10   hannken #endif /* __lint__ */
     60   1.7   thorpej 
     61  1.23       uwe static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *)
     62   1.7   thorpej 	__attribute__((__unused__));
     63  1.23       uwe static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *)
     64   1.7   thorpej 	__attribute__((__unused__));
     65  1.23       uwe static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *)
     66   1.7   thorpej 	__attribute__((__unused__));
     67  1.17        pk #ifndef __CPU_SIMPLE_LOCK_NOINLINE
     68  1.23       uwe static __inline void __cpu_simple_lock(__cpu_simple_lock_t *)
     69  1.15        pk 	__attribute__((__unused__));
     70  1.15        pk #else
     71  1.23       uwe extern void __cpu_simple_lock(__cpu_simple_lock_t *);
     72  1.12        pk #endif
     73  1.12        pk 
     74  1.29     skrll static __inline int
     75  1.32  christos __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr)
     76  1.29     skrll {
     77  1.29     skrll 	return *__ptr == __SIMPLELOCK_LOCKED;
     78  1.29     skrll }
     79  1.29     skrll 
     80  1.29     skrll static __inline int
     81  1.32  christos __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr)
     82  1.29     skrll {
     83  1.29     skrll 	return *__ptr == __SIMPLELOCK_UNLOCKED;
     84  1.29     skrll }
     85  1.29     skrll 
     86  1.29     skrll static __inline void
     87  1.29     skrll __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
     88  1.29     skrll {
     89  1.29     skrll 	*__ptr = __SIMPLELOCK_UNLOCKED;
     90  1.29     skrll }
     91  1.29     skrll 
     92  1.29     skrll static __inline void
     93  1.29     skrll __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
     94  1.29     skrll {
     95  1.29     skrll 	*__ptr = __SIMPLELOCK_LOCKED;
     96  1.29     skrll }
     97  1.29     skrll 
     98  1.22     perry static __inline void
     99   1.9   thorpej __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
    100   1.1        pk {
    101   1.2        pk 
    102   1.7   thorpej 	*alp = __SIMPLELOCK_UNLOCKED;
    103   1.1        pk }
    104   1.1        pk 
    105  1.17        pk #ifndef __CPU_SIMPLE_LOCK_NOINLINE
    106  1.22     perry static __inline void
    107   1.9   thorpej __cpu_simple_lock(__cpu_simple_lock_t *alp)
    108   1.1        pk {
    109   1.2        pk 
    110   1.2        pk 	/*
    111   1.7   thorpej 	 * If someone else holds the lock use simple reads until it
    112   1.7   thorpej 	 * is released, then retry the atomic operation. This reduces
    113   1.7   thorpej 	 * memory bus contention because the cache-coherency logic
    114   1.7   thorpej 	 * does not have to broadcast invalidates on the lock while
    115   1.7   thorpej 	 * we spin on it.
    116   1.2        pk 	 */
    117   1.7   thorpej 	while (__ldstub(alp) != __SIMPLELOCK_UNLOCKED) {
    118   1.7   thorpej 		while (*alp != __SIMPLELOCK_UNLOCKED)
    119   1.7   thorpej 			/* spin */ ;
    120   1.2        pk 	}
    121  1.34  riastrad 
    122  1.34  riastrad 	/*
    123  1.34  riastrad 	 * No memory barrier needed here to make this a load-acquire
    124  1.34  riastrad 	 * operation because LDSTUB already implies that.  See SPARCv8
    125  1.34  riastrad 	 * Reference Manual, Appendix J.4 `Spin Locks', p. 271.
    126  1.34  riastrad 	 */
    127   1.1        pk }
    128  1.17        pk #endif /* __CPU_SIMPLE_LOCK_NOINLINE */
    129   1.1        pk 
    130  1.22     perry static __inline int
    131   1.9   thorpej __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
    132   1.1        pk {
    133   1.2        pk 
    134  1.34  riastrad 	/*
    135  1.34  riastrad 	 * No memory barrier needed for LDSTUB to be a load-acquire --
    136  1.34  riastrad 	 * see __cpu_simple_lock.
    137  1.34  riastrad 	 */
    138   1.7   thorpej 	return (__ldstub(alp) == __SIMPLELOCK_UNLOCKED);
    139   1.1        pk }
    140   1.1        pk 
    141  1.22     perry static __inline void
    142   1.9   thorpej __cpu_simple_unlock(__cpu_simple_lock_t *alp)
    143   1.1        pk {
    144   1.2        pk 
    145  1.12        pk 	/*
    146  1.13        pk 	 * Insert compiler barrier to prevent instruction re-ordering
    147  1.13        pk 	 * around the lock release.
    148  1.34  riastrad 	 *
    149  1.34  riastrad 	 * No memory barrier needed because we run the kernel in TSO.
    150  1.34  riastrad 	 * If we ran the kernel in PSO, this would require STBAR.
    151  1.12        pk 	 */
    152  1.13        pk 	__insn_barrier();
    153  1.13        pk 	*alp = __SIMPLELOCK_UNLOCKED;
    154   1.1        pk }
    155   1.1        pk 
    156   1.1        pk #endif /* _MACHINE_LOCK_H */
    157