lock.h revision 1.7 1 /* $NetBSD: lock.h,v 1.7 2001/06/04 15:37:05 ragge Exp $ */
2
3 /*
4 * Copyright (c) 2000 Ludd, University of Lule}, Sweden.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed at Ludd, University of Lule}.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #ifndef _VAX_LOCK_H_
34 #define _VAX_LOCK_H_
35
36 typedef __volatile int __cpu_simple_lock_t;
37
38 #define __SIMPLELOCK_LOCKED 1
39 #define __SIMPLELOCK_UNLOCKED 0
40
41 static __inline void
42 __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
43 {
44 __asm__ __volatile ("movl %0,r1;jsb Sunlock"
45 : /* No output */
46 : "g"(alp)
47 : "r1","cc","memory");
48 #if 0
49 __asm__ __volatile ("bbcci $0, %0, 1f;1:"
50 : /* No output */
51 : "m"(*alp));
52 #endif
53 }
54
55 static __inline int
56 __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
57 {
58 int ret;
59
60 __asm__ __volatile ("movl %1,r1;jsb Slocktry;movl r0,%0"
61 : "=&r"(ret)
62 : "g"(alp)
63 : "r0","r1","cc","memory");
64 #if 0
65 __asm__ __volatile ("movl $0,%0;bbssi $0,%1,1f;incl %0;1:"
66 : "=&r"(ret)
67 : "m"(*alp));
68 #endif
69
70 return ret;
71 }
72
73 #define __cpu_simple_lock(alp) \
74 { \
75 struct cpu_info *__ci = curcpu(); \
76 \
77 while (__cpu_simple_lock_try(alp) == 0) { \
78 int __s; \
79 \
80 if (__ci->ci_ipimsgs & (1 << IPI_SEND_CNCHAR)) { \
81 __s = splipi(); \
82 cpu_handle_ipi(); \
83 splx(__s); \
84 } \
85 } \
86 }
87
88 #if 0
89 static __inline void
90 __cpu_simple_lock(__cpu_simple_lock_t *alp)
91 {
92 struct cpu_info *ci = curcpu();
93
94 while (__cpu_simple_lock_try(alp) == 0) {
95 int s;
96
97 if (ci->ci_ipimsgs & IPI_SEND_CNCHAR) {
98 s = splipi();
99 cpu_handle_ipi();
100 splx(s);
101 }
102 }
103
104 #if 0
105 __asm__ __volatile ("movl %0,r1;jsb Slock"
106 : /* No output */
107 : "g"(alp)
108 : "r0","r1","cc","memory");
109 #endif
110 #if 0
111 __asm__ __volatile ("1:;bbssi $0, %0, 1b"
112 : /* No output */
113 : "m"(*alp));
114 #endif
115 }
116 #endif
117
118 static __inline void
119 __cpu_simple_unlock(__cpu_simple_lock_t *alp)
120 {
121 __asm__ __volatile ("movl %0,r1;jsb Sunlock"
122 : /* No output */
123 : "g"(alp)
124 : "r1","cc","memory");
125 #if 0
126 __asm__ __volatile ("bbcci $0, %0, 1f;1:"
127 : /* No output */
128 : "m"(*alp));
129 #endif
130 }
131
132 #if defined(MULTIPROCESSOR)
133 /*
134 * On the Vax, interprocessor interrupts can come in at device priority
135 * level or lower. This can cause some problems while waiting for r/w
136 * spinlocks from a high'ish priority level: IPIs that come in will not
137 * be processed. This can lead to deadlock.
138 *
139 * This hook allows IPIs to be processed while a spinlock's interlock
140 * is released.
141 */
142 #define SPINLOCK_SPIN_HOOK \
143 do { \
144 struct cpu_info *__ci = curcpu(); \
145 int __s; \
146 \
147 if (__ci->ci_ipimsgs != 0) { \
148 /* printf("CPU %lu has IPIs pending\n", \
149 __ci->ci_cpuid); */ \
150 __s = splipi(); \
151 cpu_handle_ipi(); \
152 splx(__s); \
153 } \
154 } while (0)
155 #endif /* MULTIPROCESSOR */
156 #endif /* _VAX_LOCK_H_ */
157