lock.h revision 1.19 1 /* $NetBSD: lock.h,v 1.19 2005/12/24 20:07:41 perry Exp $ */
2
3 /*
4 * Copyright (c) 2000 Ludd, University of Lule}, Sweden.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed at Ludd, University of Lule}.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #ifndef _VAX_LOCK_H_
34 #define _VAX_LOCK_H_
35
36 #ifdef _KERNEL
37 #ifdef _KERNEL_OPT
38 #include "opt_multiprocessor.h"
39 #include <machine/intr.h>
40 #endif
41 #include <machine/cpu.h>
42 #endif
43
44 static inline void
45 __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
46 {
47 #ifdef _KERNEL
48 __asm__ volatile ("movl %0,%%r1;jsb Sunlock"
49 : /* No output */
50 : "g"(alp)
51 : "r1","cc","memory");
52 #else
53 __asm__ volatile ("bbcci $0,%0,1f;1:"
54 : /* No output */
55 : "m"(*alp)
56 : "cc");
57 #endif
58 }
59
60 static inline int
61 __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
62 {
63 int ret;
64
65 #ifdef _KERNEL
66 __asm__ volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0"
67 : "=&r"(ret)
68 : "g"(alp)
69 : "r0","r1","cc","memory");
70 #else
71 __asm__ volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:"
72 : "=&r"(ret)
73 : "m"(*alp)
74 : "cc");
75 #endif
76
77 return ret;
78 }
79
80 #ifdef _KERNEL
81 #define VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB))
82 #define __cpu_simple_lock(alp) \
83 do { \
84 struct cpu_info *__ci = curcpu(); \
85 \
86 while (__cpu_simple_lock_try(alp) == 0) { \
87 int ___s; \
88 \
89 if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) { \
90 ___s = splipi(); \
91 cpu_handle_ipi(); \
92 splx(___s); \
93 } \
94 } \
95 } while (0)
96 #else
97 static inline void
98 __cpu_simple_lock(__cpu_simple_lock_t *alp)
99 {
100 __asm__ volatile ("1:bbssi $0,%0,1b"
101 : /* No outputs */
102 : "m"(*alp)
103 : "cc");
104 }
105 #endif /* _KERNEL */
106
107 #if 0
108 static inline void
109 __cpu_simple_lock(__cpu_simple_lock_t *alp)
110 {
111 struct cpu_info *ci = curcpu();
112
113 while (__cpu_simple_lock_try(alp) == 0) {
114 int s;
115
116 if (ci->ci_ipimsgs & IPI_SEND_CNCHAR) {
117 s = splipi();
118 cpu_handle_ipi();
119 splx(s);
120 }
121 }
122
123 #if 0
124 __asm__ volatile ("movl %0,%%r1;jsb Slock"
125 : /* No output */
126 : "g"(alp)
127 : "r0","r1","cc","memory");
128 #endif
129 #if 0
130 __asm__ volatile ("1:;bbssi $0, %0, 1b"
131 : /* No output */
132 : "m"(*alp));
133 #endif
134 }
135 #endif
136
137 static inline void
138 __cpu_simple_unlock(__cpu_simple_lock_t *alp)
139 {
140 #ifdef _KERNEL
141 __asm__ volatile ("movl %0,%%r1;jsb Sunlock"
142 : /* No output */
143 : "g"(alp)
144 : "r1","cc","memory");
145 #else
146 __asm__ volatile ("bbcci $0,%0,1f;1:"
147 : /* No output */
148 : "m"(*alp)
149 : "cc");
150 #endif
151 }
152
153 #if defined(MULTIPROCESSOR)
154 /*
155 * On the Vax, interprocessor interrupts can come in at device priority
156 * level or lower. This can cause some problems while waiting for r/w
157 * spinlocks from a high'ish priority level: IPIs that come in will not
158 * be processed. This can lead to deadlock.
159 *
160 * This hook allows IPIs to be processed while a spinlock's interlock
161 * is released.
162 */
163 #define SPINLOCK_SPIN_HOOK \
164 do { \
165 struct cpu_info *__ci = curcpu(); \
166 int ___s; \
167 \
168 if (__ci->ci_ipimsgs != 0) { \
169 /* printf("CPU %lu has IPIs pending\n", \
170 __ci->ci_cpuid); */ \
171 ___s = splipi(); \
172 cpu_handle_ipi(); \
173 splx(___s); \
174 } \
175 } while (0)
176 #endif /* MULTIPROCESSOR */
177 #endif /* _VAX_LOCK_H_ */
178