lock.h revision 1.24 1 1.24 riastrad /* $NetBSD: lock.h,v 1.24 2022/02/13 14:06:51 riastradh Exp $ */
2 1.1 fredette
3 1.1 fredette /*-
4 1.1 fredette * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
5 1.1 fredette * All rights reserved.
6 1.1 fredette *
7 1.1 fredette * This code is derived from software contributed to The NetBSD Foundation
8 1.1 fredette * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.1 fredette * NASA Ames Research Center, and Matthew Fredette.
10 1.1 fredette *
11 1.1 fredette * Redistribution and use in source and binary forms, with or without
12 1.1 fredette * modification, are permitted provided that the following conditions
13 1.1 fredette * are met:
14 1.1 fredette * 1. Redistributions of source code must retain the above copyright
15 1.1 fredette * notice, this list of conditions and the following disclaimer.
16 1.1 fredette * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 fredette * notice, this list of conditions and the following disclaimer in the
18 1.1 fredette * documentation and/or other materials provided with the distribution.
19 1.1 fredette *
20 1.1 fredette * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 fredette * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 fredette * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 fredette * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 fredette * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 fredette * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 fredette * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 fredette * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 fredette * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 fredette * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 fredette * POSSIBILITY OF SUCH DAMAGE.
31 1.1 fredette */
32 1.1 fredette
33 1.1 fredette /*
34 1.1 fredette * Machine-dependent spin lock operations.
35 1.1 fredette */
36 1.1 fredette
37 1.1 fredette #ifndef _HPPA_LOCK_H_
38 1.1 fredette #define _HPPA_LOCK_H_
39 1.2 martin
40 1.13 he #include <sys/stdint.h>
41 1.13 he
42 1.18 christos #define HPPA_LDCW_ALIGN 16UL
43 1.12 skrll
44 1.12 skrll #define __SIMPLELOCK_ALIGN(p) \
45 1.19 christos (volatile unsigned long *)((((uintptr_t)(p) + HPPA_LDCW_ALIGN - 1)) & \
46 1.12 skrll ~(HPPA_LDCW_ALIGN - 1))
47 1.12 skrll
48 1.17 christos #define __SIMPLELOCK_RAW_LOCKED 0UL
49 1.17 christos #define __SIMPLELOCK_RAW_UNLOCKED 1UL
50 1.12 skrll
51 1.12 skrll static __inline int
52 1.20 christos __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr)
53 1.12 skrll {
54 1.12 skrll return *__SIMPLELOCK_ALIGN(__ptr) == __SIMPLELOCK_RAW_LOCKED;
55 1.12 skrll }
56 1.12 skrll
57 1.12 skrll static __inline int
58 1.20 christos __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr)
59 1.12 skrll {
60 1.12 skrll return *__SIMPLELOCK_ALIGN(__ptr) == __SIMPLELOCK_RAW_UNLOCKED;
61 1.12 skrll }
62 1.12 skrll
63 1.10 skrll static __inline int
64 1.12 skrll __ldcw(volatile unsigned long *__ptr)
65 1.10 skrll {
66 1.10 skrll int __val;
67 1.10 skrll
68 1.10 skrll __asm volatile("ldcw 0(%1), %0"
69 1.10 skrll : "=r" (__val) : "r" (__ptr)
70 1.10 skrll : "memory");
71 1.10 skrll
72 1.10 skrll return __val;
73 1.10 skrll }
74 1.10 skrll
75 1.10 skrll static __inline void
76 1.10 skrll __sync(void)
77 1.10 skrll {
78 1.10 skrll
79 1.10 skrll __asm volatile("sync\n"
80 1.10 skrll : /* no outputs */
81 1.10 skrll : /* no inputs */
82 1.10 skrll : "memory");
83 1.10 skrll }
84 1.10 skrll
85 1.9 perry static __inline void
86 1.1 fredette __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
87 1.1 fredette {
88 1.23 riastrad
89 1.21 skrll alp->csl_lock[0] = alp->csl_lock[1] =
90 1.15 skrll alp->csl_lock[2] = alp->csl_lock[3] =
91 1.15 skrll __SIMPLELOCK_RAW_UNLOCKED;
92 1.1 fredette }
93 1.1 fredette
94 1.24 riastrad static __inline int
95 1.24 riastrad __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
96 1.24 riastrad {
97 1.24 riastrad volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
98 1.24 riastrad
99 1.24 riastrad if (__ldcw(__aptr) == __SIMPLELOCK_RAW_LOCKED)
100 1.24 riastrad return 0;
101 1.24 riastrad
102 1.24 riastrad /*
103 1.24 riastrad * __cpu_simple_lock_try must be a load-acquire operation, but
104 1.24 riastrad * HPPA's LDCW does not appear to guarantee load-acquire
105 1.24 riastrad * semantics, so we have to do LDCW and then an explicit SYNC
106 1.24 riastrad * to make a load-acquire operation that pairs with a preceding
107 1.24 riastrad * store-release in __cpu_simple_unlock.
108 1.24 riastrad */
109 1.24 riastrad __sync();
110 1.24 riastrad return 1;
111 1.24 riastrad }
112 1.24 riastrad
113 1.9 perry static __inline void
114 1.1 fredette __cpu_simple_lock(__cpu_simple_lock_t *alp)
115 1.1 fredette {
116 1.12 skrll volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
117 1.1 fredette
118 1.1 fredette /*
119 1.1 fredette * Note, if we detect that the lock is held when
120 1.1 fredette * we do the initial load-clear-word, we spin using
121 1.1 fredette * a non-locked load to save the coherency logic
122 1.1 fredette * some work.
123 1.1 fredette */
124 1.1 fredette
125 1.24 riastrad while (!__cpu_simple_lock_try(alp))
126 1.12 skrll while (*__aptr == __SIMPLELOCK_RAW_LOCKED)
127 1.10 skrll ;
128 1.1 fredette }
129 1.1 fredette
130 1.9 perry static __inline void
131 1.1 fredette __cpu_simple_unlock(__cpu_simple_lock_t *alp)
132 1.1 fredette {
133 1.12 skrll volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
134 1.12 skrll
135 1.24 riastrad /*
136 1.24 riastrad * SYNC and then store makes a store-release that pairs with
137 1.24 riastrad * the load-acquire in a subsequent __cpu_simple_lock_try.
138 1.24 riastrad */
139 1.10 skrll __sync();
140 1.12 skrll *__aptr = __SIMPLELOCK_RAW_UNLOCKED;
141 1.12 skrll }
142 1.12 skrll
143 1.12 skrll static __inline void
144 1.12 skrll __cpu_simple_lock_set(__cpu_simple_lock_t *alp)
145 1.12 skrll {
146 1.12 skrll volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
147 1.12 skrll
148 1.12 skrll *__aptr = __SIMPLELOCK_RAW_LOCKED;
149 1.12 skrll }
150 1.12 skrll
151 1.12 skrll static __inline void
152 1.12 skrll __cpu_simple_lock_clear(__cpu_simple_lock_t *alp)
153 1.12 skrll {
154 1.12 skrll volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
155 1.12 skrll
156 1.12 skrll *__aptr = __SIMPLELOCK_RAW_UNLOCKED;
157 1.1 fredette }
158 1.1 fredette
159 1.1 fredette #endif /* _HPPA_LOCK_H_ */
160