lock.h revision 1.26 1 1.26 skrll /* $NetBSD: lock.h,v 1.26 2007/09/10 11:34:10 skrll Exp $ */
2 1.1 ragge
3 1.1 ragge /*
4 1.1 ragge * Copyright (c) 2000 Ludd, University of Lule}, Sweden.
5 1.1 ragge * All rights reserved.
6 1.1 ragge *
7 1.1 ragge * Redistribution and use in source and binary forms, with or without
8 1.1 ragge * modification, are permitted provided that the following conditions
9 1.1 ragge * are met:
10 1.1 ragge * 1. Redistributions of source code must retain the above copyright
11 1.1 ragge * notice, this list of conditions and the following disclaimer.
12 1.1 ragge * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 ragge * notice, this list of conditions and the following disclaimer in the
14 1.1 ragge * documentation and/or other materials provided with the distribution.
15 1.1 ragge * 3. All advertising materials mentioning features or use of this software
16 1.1 ragge * must display the following acknowledgement:
17 1.1 ragge * This product includes software developed at Ludd, University of Lule}.
18 1.1 ragge * 4. The name of the author may not be used to endorse or promote products
19 1.1 ragge * derived from this software without specific prior written permission
20 1.1 ragge *
21 1.1 ragge * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 1.1 ragge * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 1.1 ragge * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 1.1 ragge * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 1.1 ragge * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 1.1 ragge * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 1.1 ragge * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 1.1 ragge * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 1.1 ragge * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 1.1 ragge * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 1.1 ragge */
32 1.1 ragge
33 1.1 ragge #ifndef _VAX_LOCK_H_
34 1.1 ragge #define _VAX_LOCK_H_
35 1.11 matt
36 1.11 matt #ifdef _KERNEL
37 1.13 he #ifdef _KERNEL_OPT
38 1.12 martin #include "opt_multiprocessor.h"
39 1.16 he #include <machine/intr.h>
40 1.13 he #endif
41 1.11 matt #include <machine/cpu.h>
42 1.11 matt #endif
43 1.3 ragge
44 1.26 skrll static __inline int
45 1.26 skrll __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
46 1.26 skrll {
47 1.26 skrll return *__ptr == __SIMPLELOCK_LOCKED;
48 1.26 skrll }
49 1.26 skrll
50 1.26 skrll static __inline int
51 1.26 skrll __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
52 1.26 skrll {
53 1.26 skrll return *__ptr == __SIMPLELOCK_UNLOCKED;
54 1.26 skrll }
55 1.26 skrll
56 1.26 skrll static __inline void
57 1.26 skrll __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
58 1.26 skrll {
59 1.26 skrll *__ptr = __SIMPLELOCK_UNLOCKED;
60 1.26 skrll }
61 1.26 skrll
62 1.26 skrll static __inline void
63 1.26 skrll __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
64 1.26 skrll {
65 1.26 skrll *__ptr = __SIMPLELOCK_LOCKED;
66 1.26 skrll }
67 1.26 skrll
68 1.24 christos static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *);
69 1.24 christos static __inline void
70 1.24 christos __cpu_simple_lock_init(__cpu_simple_lock_t *__alp)
71 1.1 ragge {
72 1.9 matt #ifdef _KERNEL
73 1.20 perry __asm volatile ("movl %0,%%r1;jsb Sunlock"
74 1.6 ragge : /* No output */
75 1.24 christos : "g"(__alp)
76 1.6 ragge : "r1","cc","memory");
77 1.9 matt #else
78 1.20 perry __asm volatile ("bbcci $0,%0,1f;1:"
79 1.6 ragge : /* No output */
80 1.24 christos : "m"(*__alp)
81 1.9 matt : "cc");
82 1.6 ragge #endif
83 1.1 ragge }
84 1.1 ragge
85 1.24 christos static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *);
86 1.24 christos static __inline int
87 1.24 christos __cpu_simple_lock_try(__cpu_simple_lock_t *__alp)
88 1.7 ragge {
89 1.7 ragge int ret;
90 1.7 ragge
91 1.9 matt #ifdef _KERNEL
92 1.20 perry __asm volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0"
93 1.7 ragge : "=&r"(ret)
94 1.24 christos : "g"(__alp)
95 1.7 ragge : "r0","r1","cc","memory");
96 1.9 matt #else
97 1.20 perry __asm volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:"
98 1.7 ragge : "=&r"(ret)
99 1.24 christos : "m"(*__alp)
100 1.9 matt : "cc");
101 1.7 ragge #endif
102 1.7 ragge
103 1.7 ragge return ret;
104 1.7 ragge }
105 1.7 ragge
106 1.9 matt #ifdef _KERNEL
107 1.25 ragge #if defined(MULTIPROCESSOR)
108 1.8 ragge #define VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB))
109 1.24 christos #define __cpu_simple_lock(__alp) \
110 1.9 matt do { \
111 1.7 ragge struct cpu_info *__ci = curcpu(); \
112 1.7 ragge \
113 1.24 christos while (__cpu_simple_lock_try(__alp) == 0) { \
114 1.24 christos int __s; \
115 1.7 ragge \
116 1.8 ragge if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) { \
117 1.24 christos __s = splipi(); \
118 1.7 ragge cpu_handle_ipi(); \
119 1.24 christos splx(__s); \
120 1.7 ragge } \
121 1.7 ragge } \
122 1.24 christos } while (/*CONSTCOND*/0)
123 1.25 ragge #else /* MULTIPROCESSOR */
124 1.25 ragge #define __cpu_simple_lock(__alp) \
125 1.25 ragge do { \
126 1.25 ragge while (__cpu_simple_lock_try(__alp) == 0) { \
127 1.25 ragge ; \
128 1.25 ragge } \
129 1.25 ragge } while (/*CONSTCOND*/0)
130 1.25 ragge #endif
131 1.9 matt #else
132 1.24 christos static __inline void __cpu_simple_lock(__cpu_simple_lock_t *);
133 1.24 christos static __inline void
134 1.24 christos __cpu_simple_lock(__cpu_simple_lock_t *__alp)
135 1.9 matt {
136 1.20 perry __asm volatile ("1:bbssi $0,%0,1b"
137 1.9 matt : /* No outputs */
138 1.24 christos : "m"(*__alp)
139 1.9 matt : "cc");
140 1.7 ragge }
141 1.9 matt #endif /* _KERNEL */
142 1.7 ragge
143 1.7 ragge #if 0
144 1.24 christos static __inline void __cpu_simple_lock(__cpu_simple_lock_t *);
145 1.24 christos static __inline void
146 1.24 christos __cpu_simple_lock(__cpu_simple_lock_t *__alp)
147 1.1 ragge {
148 1.7 ragge struct cpu_info *ci = curcpu();
149 1.7 ragge
150 1.24 christos while (__cpu_simple_lock_try(__alp) == 0) {
151 1.7 ragge int s;
152 1.7 ragge
153 1.7 ragge if (ci->ci_ipimsgs & IPI_SEND_CNCHAR) {
154 1.7 ragge s = splipi();
155 1.7 ragge cpu_handle_ipi();
156 1.7 ragge splx(s);
157 1.7 ragge }
158 1.7 ragge }
159 1.7 ragge
160 1.7 ragge #if 0
161 1.20 perry __asm volatile ("movl %0,%%r1;jsb Slock"
162 1.6 ragge : /* No output */
163 1.24 christos : "g"(__alp)
164 1.6 ragge : "r0","r1","cc","memory");
165 1.7 ragge #endif
166 1.6 ragge #if 0
167 1.20 perry __asm volatile ("1:;bbssi $0, %0, 1b"
168 1.1 ragge : /* No output */
169 1.24 christos : "m"(*__alp));
170 1.6 ragge #endif
171 1.1 ragge }
172 1.7 ragge #endif
173 1.1 ragge
174 1.24 christos static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *);
175 1.24 christos static __inline void
176 1.24 christos __cpu_simple_unlock(__cpu_simple_lock_t *__alp)
177 1.1 ragge {
178 1.9 matt #ifdef _KERNEL
179 1.20 perry __asm volatile ("movl %0,%%r1;jsb Sunlock"
180 1.6 ragge : /* No output */
181 1.24 christos : "g"(__alp)
182 1.6 ragge : "r1","cc","memory");
183 1.9 matt #else
184 1.20 perry __asm volatile ("bbcci $0,%0,1f;1:"
185 1.6 ragge : /* No output */
186 1.24 christos : "m"(*__alp)
187 1.9 matt : "cc");
188 1.6 ragge #endif
189 1.1 ragge }
190 1.1 ragge
191 1.6 ragge #if defined(MULTIPROCESSOR)
192 1.6 ragge /*
193 1.6 ragge * On the Vax, interprocessor interrupts can come in at device priority
194 1.6 ragge * level or lower. This can cause some problems while waiting for r/w
195 1.6 ragge * spinlocks from a high'ish priority level: IPIs that come in will not
196 1.6 ragge * be processed. This can lead to deadlock.
197 1.6 ragge *
198 1.6 ragge * This hook allows IPIs to be processed while a spinlock's interlock
199 1.6 ragge * is released.
200 1.6 ragge */
201 1.6 ragge #define SPINLOCK_SPIN_HOOK \
202 1.6 ragge do { \
203 1.6 ragge struct cpu_info *__ci = curcpu(); \
204 1.24 christos int __s; \
205 1.6 ragge \
206 1.6 ragge if (__ci->ci_ipimsgs != 0) { \
207 1.6 ragge /* printf("CPU %lu has IPIs pending\n", \
208 1.6 ragge __ci->ci_cpuid); */ \
209 1.24 christos __s = splipi(); \
210 1.6 ragge cpu_handle_ipi(); \
211 1.24 christos splx(__s); \
212 1.6 ragge } \
213 1.24 christos } while (/*CONSTCOND*/0)
214 1.6 ragge #endif /* MULTIPROCESSOR */
215 1.22 matt
216 1.24 christos static __inline void mb_read(void);
217 1.24 christos static __inline void
218 1.22 matt mb_read(void)
219 1.22 matt {
220 1.22 matt }
221 1.22 matt
222 1.24 christos static __inline void mb_write(void);
223 1.24 christos static __inline void
224 1.22 matt mb_write(void)
225 1.22 matt {
226 1.22 matt }
227 1.1 ragge #endif /* _VAX_LOCK_H_ */
228