lock.h revision 1.5.4.1 1 /* $NetBSD: lock.h,v 1.5.4.1 2001/06/21 19:38:16 nathanw Exp $ */
2
3 /*
4 * Copyright (c) 2000 Ludd, University of Lule}, Sweden.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed at Ludd, University of Lule}.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #ifndef _VAX_LOCK_H_
34 #define _VAX_LOCK_H_
35
36 typedef __volatile int __cpu_simple_lock_t;
37
38 #define __SIMPLELOCK_LOCKED 1
39 #define __SIMPLELOCK_UNLOCKED 0
40
41 static __inline void
42 __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
43 {
44 __asm__ __volatile ("movl %0,r1;jsb Sunlock"
45 : /* No output */
46 : "g"(alp)
47 : "r1","cc","memory");
48 #if 0
49 __asm__ __volatile ("bbcci $0, %0, 1f;1:"
50 : /* No output */
51 : "m"(*alp));
52 #endif
53 }
54
55 static __inline void
56 __cpu_simple_unlock(__cpu_simple_lock_t *alp)
57 {
58 *alp = __SIMPLELOCK_UNLOCKED;
59 }
60
61 static __inline int
62 __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
63 {
64 int ret;
65
66 __asm__ __volatile ("movl %1,r1;jsb Slocktry;movl r0,%0"
67 : "=&r"(ret)
68 : "g"(alp)
69 : "r0","r1","cc","memory");
70 #if 0
71 __asm__ __volatile ("movl $0,%0;bbssi $0,%1,1f;incl %0;1:"
72 : "=&r"(ret)
73 : "m"(*alp));
74 #endif
75
76 return ret;
77 }
78
79 #define VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB))
80 #define __cpu_simple_lock(alp) \
81 { \
82 struct cpu_info *__ci = curcpu(); \
83 \
84 while (__cpu_simple_lock_try(alp) == 0) { \
85 int __s; \
86 \
87 if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) { \
88 __s = splipi(); \
89 cpu_handle_ipi(); \
90 splx(__s); \
91 } \
92 } \
93 }
94
95 #if 0
96 static __inline void
97 __cpu_simple_lock(__cpu_simple_lock_t *alp)
98 {
99 struct cpu_info *ci = curcpu();
100
101 while (__cpu_simple_lock_try(alp) == 0) {
102 int s;
103
104 if (ci->ci_ipimsgs & IPI_SEND_CNCHAR) {
105 s = splipi();
106 cpu_handle_ipi();
107 splx(s);
108 }
109 }
110
111 #if 0
112 __asm__ __volatile ("movl %0,r1;jsb Slock"
113 : /* No output */
114 : "g"(alp)
115 : "r0","r1","cc","memory");
116 #endif
117 #if 0
118 __asm__ __volatile ("1:;bbssi $0, %0, 1b"
119 : /* No output */
120 : "m"(*alp));
121 #endif
122 }
123 #endif
124
125 static __inline void
126 __cpu_simple_unlock(__cpu_simple_lock_t *alp)
127 {
128 __asm__ __volatile ("movl %0,r1;jsb Sunlock"
129 : /* No output */
130 : "g"(alp)
131 : "r1","cc","memory");
132 #if 0
133 __asm__ __volatile ("bbcci $0, %0, 1f;1:"
134 : /* No output */
135 : "m"(*alp));
136 #endif
137 }
138
139 #if defined(MULTIPROCESSOR)
140 /*
141 * On the Vax, interprocessor interrupts can come in at device priority
142 * level or lower. This can cause some problems while waiting for r/w
143 * spinlocks from a high'ish priority level: IPIs that come in will not
144 * be processed. This can lead to deadlock.
145 *
146 * This hook allows IPIs to be processed while a spinlock's interlock
147 * is released.
148 */
149 #define SPINLOCK_SPIN_HOOK \
150 do { \
151 struct cpu_info *__ci = curcpu(); \
152 int __s; \
153 \
154 if (__ci->ci_ipimsgs != 0) { \
155 /* printf("CPU %lu has IPIs pending\n", \
156 __ci->ci_cpuid); */ \
157 __s = splipi(); \
158 cpu_handle_ipi(); \
159 splx(__s); \
160 } \
161 } while (0)
162 #endif /* MULTIPROCESSOR */
163 #endif /* _VAX_LOCK_H_ */
164