Home | History | Annotate | Line # | Download | only in include
      1 /*	$NetBSD: lock.h,v 1.34 2022/02/13 13:41:17 riastradh Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 1998, 1999, 2006 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Paul Kranenburg and Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _MACHINE_LOCK_H
     33 #define _MACHINE_LOCK_H
     34 
     35 /*
     36  * Machine dependent spin lock operations.
     37  */
     38 
     39 #if __SIMPLELOCK_UNLOCKED != 0
     40 #error __SIMPLELOCK_UNLOCKED must be 0 for this implementation
     41 #endif
     42 
     43 /* XXX So we can expose this to userland. */
     44 #ifdef __lint__
     45 #define __ldstub(__addr)	(__addr)
     46 #else /* !__lint__ */
     47 static __inline int __ldstub(__cpu_simple_lock_t *addr);
     48 static __inline int __ldstub(__cpu_simple_lock_t *addr)
     49 {
     50 	int v;
     51 
     52 	__asm volatile("ldstub [%1],%0"
     53 	    : "=&r" (v)
     54 	    : "r" (addr)
     55 	    : "memory");
     56 
     57 	return v;
     58 }
     59 #endif /* __lint__ */
     60 
     61 static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *)
     62 	__attribute__((__unused__));
     63 static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *)
     64 	__attribute__((__unused__));
     65 static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *)
     66 	__attribute__((__unused__));
     67 #ifndef __CPU_SIMPLE_LOCK_NOINLINE
     68 static __inline void __cpu_simple_lock(__cpu_simple_lock_t *)
     69 	__attribute__((__unused__));
     70 #else
     71 extern void __cpu_simple_lock(__cpu_simple_lock_t *);
     72 #endif
     73 
     74 static __inline int
     75 __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr)
     76 {
     77 	return *__ptr == __SIMPLELOCK_LOCKED;
     78 }
     79 
     80 static __inline int
     81 __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr)
     82 {
     83 	return *__ptr == __SIMPLELOCK_UNLOCKED;
     84 }
     85 
     86 static __inline void
     87 __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
     88 {
     89 	*__ptr = __SIMPLELOCK_UNLOCKED;
     90 }
     91 
     92 static __inline void
     93 __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
     94 {
     95 	*__ptr = __SIMPLELOCK_LOCKED;
     96 }
     97 
     98 static __inline void
     99 __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
    100 {
    101 
    102 	*alp = __SIMPLELOCK_UNLOCKED;
    103 }
    104 
    105 #ifndef __CPU_SIMPLE_LOCK_NOINLINE
    106 static __inline void
    107 __cpu_simple_lock(__cpu_simple_lock_t *alp)
    108 {
    109 
    110 	/*
    111 	 * If someone else holds the lock use simple reads until it
    112 	 * is released, then retry the atomic operation. This reduces
    113 	 * memory bus contention because the cache-coherency logic
    114 	 * does not have to broadcast invalidates on the lock while
    115 	 * we spin on it.
    116 	 */
    117 	while (__ldstub(alp) != __SIMPLELOCK_UNLOCKED) {
    118 		while (*alp != __SIMPLELOCK_UNLOCKED)
    119 			/* spin */ ;
    120 	}
    121 
    122 	/*
    123 	 * No memory barrier needed here to make this a load-acquire
    124 	 * operation because LDSTUB already implies that.  See SPARCv8
    125 	 * Reference Manual, Appendix J.4 `Spin Locks', p. 271.
    126 	 */
    127 }
    128 #endif /* __CPU_SIMPLE_LOCK_NOINLINE */
    129 
    130 static __inline int
    131 __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
    132 {
    133 
    134 	/*
    135 	 * No memory barrier needed for LDSTUB to be a load-acquire --
    136 	 * see __cpu_simple_lock.
    137 	 */
    138 	return (__ldstub(alp) == __SIMPLELOCK_UNLOCKED);
    139 }
    140 
    141 static __inline void
    142 __cpu_simple_unlock(__cpu_simple_lock_t *alp)
    143 {
    144 
    145 	/*
    146 	 * Insert compiler barrier to prevent instruction re-ordering
    147 	 * around the lock release.
    148 	 *
    149 	 * No memory barrier needed because we run the kernel in TSO.
    150 	 * If we ran the kernel in PSO, this would require STBAR.
    151 	 */
    152 	__insn_barrier();
    153 	*alp = __SIMPLELOCK_UNLOCKED;
    154 }
    155 
    156 #endif /* _MACHINE_LOCK_H */
    157