lock.h revision 1.24 1 /* $NetBSD: lock.h,v 1.24 2007/02/26 01:33:41 christos Exp $ */
2
3 /*
4 * Copyright (c) 2000 Ludd, University of Lule}, Sweden.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed at Ludd, University of Lule}.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #ifndef _VAX_LOCK_H_
34 #define _VAX_LOCK_H_
35
36 #ifdef _KERNEL
37 #ifdef _KERNEL_OPT
38 #include "opt_multiprocessor.h"
39 #include <machine/intr.h>
40 #endif
41 #include <machine/cpu.h>
42 #endif
43
44 static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *);
45 static __inline void
46 __cpu_simple_lock_init(__cpu_simple_lock_t *__alp)
47 {
48 #ifdef _KERNEL
49 __asm volatile ("movl %0,%%r1;jsb Sunlock"
50 : /* No output */
51 : "g"(__alp)
52 : "r1","cc","memory");
53 #else
54 __asm volatile ("bbcci $0,%0,1f;1:"
55 : /* No output */
56 : "m"(*__alp)
57 : "cc");
58 #endif
59 }
60
61 static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *);
62 static __inline int
63 __cpu_simple_lock_try(__cpu_simple_lock_t *__alp)
64 {
65 int ret;
66
67 #ifdef _KERNEL
68 __asm volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0"
69 : "=&r"(ret)
70 : "g"(__alp)
71 : "r0","r1","cc","memory");
72 #else
73 __asm volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:"
74 : "=&r"(ret)
75 : "m"(*__alp)
76 : "cc");
77 #endif
78
79 return ret;
80 }
81
82 #ifdef _KERNEL
83 #define VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB))
84 #define __cpu_simple_lock(__alp) \
85 do { \
86 struct cpu_info *__ci = curcpu(); \
87 \
88 while (__cpu_simple_lock_try(__alp) == 0) { \
89 int __s; \
90 \
91 if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) { \
92 __s = splipi(); \
93 cpu_handle_ipi(); \
94 splx(__s); \
95 } \
96 } \
97 } while (/*CONSTCOND*/0)
98 #else
99 static __inline void __cpu_simple_lock(__cpu_simple_lock_t *);
100 static __inline void
101 __cpu_simple_lock(__cpu_simple_lock_t *__alp)
102 {
103 __asm volatile ("1:bbssi $0,%0,1b"
104 : /* No outputs */
105 : "m"(*__alp)
106 : "cc");
107 }
108 #endif /* _KERNEL */
109
110 #if 0
111 static __inline void __cpu_simple_lock(__cpu_simple_lock_t *);
112 static __inline void
113 __cpu_simple_lock(__cpu_simple_lock_t *__alp)
114 {
115 struct cpu_info *ci = curcpu();
116
117 while (__cpu_simple_lock_try(__alp) == 0) {
118 int s;
119
120 if (ci->ci_ipimsgs & IPI_SEND_CNCHAR) {
121 s = splipi();
122 cpu_handle_ipi();
123 splx(s);
124 }
125 }
126
127 #if 0
128 __asm volatile ("movl %0,%%r1;jsb Slock"
129 : /* No output */
130 : "g"(__alp)
131 : "r0","r1","cc","memory");
132 #endif
133 #if 0
134 __asm volatile ("1:;bbssi $0, %0, 1b"
135 : /* No output */
136 : "m"(*__alp));
137 #endif
138 }
139 #endif
140
141 static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *);
142 static __inline void
143 __cpu_simple_unlock(__cpu_simple_lock_t *__alp)
144 {
145 #ifdef _KERNEL
146 __asm volatile ("movl %0,%%r1;jsb Sunlock"
147 : /* No output */
148 : "g"(__alp)
149 : "r1","cc","memory");
150 #else
151 __asm volatile ("bbcci $0,%0,1f;1:"
152 : /* No output */
153 : "m"(*__alp)
154 : "cc");
155 #endif
156 }
157
158 #if defined(MULTIPROCESSOR)
159 /*
160 * On the Vax, interprocessor interrupts can come in at device priority
161 * level or lower. This can cause some problems while waiting for r/w
162 * spinlocks from a high'ish priority level: IPIs that come in will not
163 * be processed. This can lead to deadlock.
164 *
165 * This hook allows IPIs to be processed while a spinlock's interlock
166 * is released.
167 */
168 #define SPINLOCK_SPIN_HOOK \
169 do { \
170 struct cpu_info *__ci = curcpu(); \
171 int __s; \
172 \
173 if (__ci->ci_ipimsgs != 0) { \
174 /* printf("CPU %lu has IPIs pending\n", \
175 __ci->ci_cpuid); */ \
176 __s = splipi(); \
177 cpu_handle_ipi(); \
178 splx(__s); \
179 } \
180 } while (/*CONSTCOND*/0)
181 #endif /* MULTIPROCESSOR */
182
183 static __inline void mb_read(void);
184 static __inline void
185 mb_read(void)
186 {
187 }
188
189 static __inline void mb_write(void);
190 static __inline void
191 mb_write(void)
192 {
193 }
194 #endif /* _VAX_LOCK_H_ */
195