lock.h revision 1.16.12.3 1 /* $NetBSD: lock.h,v 1.16.12.3 2007/09/03 14:30:48 yamt Exp $ */
2
3 /*
4 * Copyright (c) 2000 Ludd, University of Lule}, Sweden.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed at Ludd, University of Lule}.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #ifndef _VAX_LOCK_H_
34 #define _VAX_LOCK_H_
35
36 #ifdef _KERNEL
37 #ifdef _KERNEL_OPT
38 #include "opt_multiprocessor.h"
39 #include <machine/intr.h>
40 #endif
41 #include <machine/cpu.h>
42 #endif
43
44 static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *);
45 static __inline void
46 __cpu_simple_lock_init(__cpu_simple_lock_t *__alp)
47 {
48 #ifdef _KERNEL
49 __asm volatile ("movl %0,%%r1;jsb Sunlock"
50 : /* No output */
51 : "g"(__alp)
52 : "r1","cc","memory");
53 #else
54 __asm volatile ("bbcci $0,%0,1f;1:"
55 : /* No output */
56 : "m"(*__alp)
57 : "cc");
58 #endif
59 }
60
61 static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *);
62 static __inline int
63 __cpu_simple_lock_try(__cpu_simple_lock_t *__alp)
64 {
65 int ret;
66
67 #ifdef _KERNEL
68 __asm volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0"
69 : "=&r"(ret)
70 : "g"(__alp)
71 : "r0","r1","cc","memory");
72 #else
73 __asm volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:"
74 : "=&r"(ret)
75 : "m"(*__alp)
76 : "cc");
77 #endif
78
79 return ret;
80 }
81
82 #ifdef _KERNEL
83 #if defined(MULTIPROCESSOR)
84 #define VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB))
85 #define __cpu_simple_lock(__alp) \
86 do { \
87 struct cpu_info *__ci = curcpu(); \
88 \
89 while (__cpu_simple_lock_try(__alp) == 0) { \
90 int __s; \
91 \
92 if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) { \
93 __s = splipi(); \
94 cpu_handle_ipi(); \
95 splx(__s); \
96 } \
97 } \
98 } while (/*CONSTCOND*/0)
99 #else /* MULTIPROCESSOR */
100 #define __cpu_simple_lock(__alp) \
101 do { \
102 while (__cpu_simple_lock_try(__alp) == 0) { \
103 ; \
104 } \
105 } while (/*CONSTCOND*/0)
106 #endif
107 #else
108 static __inline void __cpu_simple_lock(__cpu_simple_lock_t *);
109 static __inline void
110 __cpu_simple_lock(__cpu_simple_lock_t *__alp)
111 {
112 __asm volatile ("1:bbssi $0,%0,1b"
113 : /* No outputs */
114 : "m"(*__alp)
115 : "cc");
116 }
117 #endif /* _KERNEL */
118
119 #if 0
120 static __inline void __cpu_simple_lock(__cpu_simple_lock_t *);
121 static __inline void
122 __cpu_simple_lock(__cpu_simple_lock_t *__alp)
123 {
124 struct cpu_info *ci = curcpu();
125
126 while (__cpu_simple_lock_try(__alp) == 0) {
127 int s;
128
129 if (ci->ci_ipimsgs & IPI_SEND_CNCHAR) {
130 s = splipi();
131 cpu_handle_ipi();
132 splx(s);
133 }
134 }
135
136 #if 0
137 __asm volatile ("movl %0,%%r1;jsb Slock"
138 : /* No output */
139 : "g"(__alp)
140 : "r0","r1","cc","memory");
141 #endif
142 #if 0
143 __asm volatile ("1:;bbssi $0, %0, 1b"
144 : /* No output */
145 : "m"(*__alp));
146 #endif
147 }
148 #endif
149
150 static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *);
151 static __inline void
152 __cpu_simple_unlock(__cpu_simple_lock_t *__alp)
153 {
154 #ifdef _KERNEL
155 __asm volatile ("movl %0,%%r1;jsb Sunlock"
156 : /* No output */
157 : "g"(__alp)
158 : "r1","cc","memory");
159 #else
160 __asm volatile ("bbcci $0,%0,1f;1:"
161 : /* No output */
162 : "m"(*__alp)
163 : "cc");
164 #endif
165 }
166
167 #if defined(MULTIPROCESSOR)
168 /*
169 * On the Vax, interprocessor interrupts can come in at device priority
170 * level or lower. This can cause some problems while waiting for r/w
171 * spinlocks from a high'ish priority level: IPIs that come in will not
172 * be processed. This can lead to deadlock.
173 *
174 * This hook allows IPIs to be processed while a spinlock's interlock
175 * is released.
176 */
177 #define SPINLOCK_SPIN_HOOK \
178 do { \
179 struct cpu_info *__ci = curcpu(); \
180 int __s; \
181 \
182 if (__ci->ci_ipimsgs != 0) { \
183 /* printf("CPU %lu has IPIs pending\n", \
184 __ci->ci_cpuid); */ \
185 __s = splipi(); \
186 cpu_handle_ipi(); \
187 splx(__s); \
188 } \
189 } while (/*CONSTCOND*/0)
190 #endif /* MULTIPROCESSOR */
191
192 static __inline void mb_read(void);
193 static __inline void
194 mb_read(void)
195 {
196 }
197
198 static __inline void mb_write(void);
199 static __inline void
200 mb_write(void)
201 {
202 }
203 #endif /* _VAX_LOCK_H_ */
204