lock.h revision 1.5.4.1 1 1.5.4.1 nathanw /* $NetBSD: lock.h,v 1.5.4.1 2001/06/21 19:38:16 nathanw Exp $ */
2 1.1 ragge
3 1.1 ragge /*
4 1.1 ragge * Copyright (c) 2000 Ludd, University of Lule}, Sweden.
5 1.1 ragge * All rights reserved.
6 1.1 ragge *
7 1.1 ragge * Redistribution and use in source and binary forms, with or without
8 1.1 ragge * modification, are permitted provided that the following conditions
9 1.1 ragge * are met:
10 1.1 ragge * 1. Redistributions of source code must retain the above copyright
11 1.1 ragge * notice, this list of conditions and the following disclaimer.
12 1.1 ragge * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 ragge * notice, this list of conditions and the following disclaimer in the
14 1.1 ragge * documentation and/or other materials provided with the distribution.
15 1.1 ragge * 3. All advertising materials mentioning features or use of this software
16 1.1 ragge * must display the following acknowledgement:
17 1.1 ragge * This product includes software developed at Ludd, University of Lule}.
18 1.1 ragge * 4. The name of the author may not be used to endorse or promote products
19 1.1 ragge * derived from this software without specific prior written permission
20 1.1 ragge *
21 1.1 ragge * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 1.1 ragge * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 1.1 ragge * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 1.1 ragge * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 1.1 ragge * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 1.1 ragge * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 1.1 ragge * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 1.1 ragge * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 1.1 ragge * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 1.1 ragge * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 1.1 ragge */
32 1.1 ragge
33 1.1 ragge #ifndef _VAX_LOCK_H_
34 1.1 ragge #define _VAX_LOCK_H_
35 1.3 ragge
36 1.5.4.1 nathanw typedef __volatile int __cpu_simple_lock_t;
37 1.4 thorpej
38 1.5.4.1 nathanw #define __SIMPLELOCK_LOCKED 1
39 1.5.4.1 nathanw #define __SIMPLELOCK_UNLOCKED 0
40 1.3 ragge
41 1.2 thorpej static __inline void
42 1.4 thorpej __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
43 1.1 ragge {
44 1.5.4.1 nathanw __asm__ __volatile ("movl %0,r1;jsb Sunlock"
45 1.5.4.1 nathanw : /* No output */
46 1.5.4.1 nathanw : "g"(alp)
47 1.5.4.1 nathanw : "r1","cc","memory");
48 1.5.4.1 nathanw #if 0
49 1.5.4.1 nathanw __asm__ __volatile ("bbcci $0, %0, 1f;1:"
50 1.1 ragge : /* No output */
51 1.5 matt : "m"(*alp));
52 1.5.4.1 nathanw #endif
53 1.1 ragge }
54 1.1 ragge
55 1.2 thorpej static __inline void
56 1.4 thorpej __cpu_simple_unlock(__cpu_simple_lock_t *alp)
57 1.1 ragge {
58 1.2 thorpej *alp = __SIMPLELOCK_UNLOCKED;
59 1.1 ragge }
60 1.1 ragge
61 1.2 thorpej static __inline int
62 1.4 thorpej __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
63 1.1 ragge {
64 1.5 matt int ret;
65 1.1 ragge
66 1.5.4.1 nathanw __asm__ __volatile ("movl %1,r1;jsb Slocktry;movl r0,%0"
67 1.5.4.1 nathanw : "=&r"(ret)
68 1.5.4.1 nathanw : "g"(alp)
69 1.5.4.1 nathanw : "r0","r1","cc","memory");
70 1.5.4.1 nathanw #if 0
71 1.5 matt __asm__ __volatile ("movl $0,%0;bbssi $0,%1,1f;incl %0;1:"
72 1.5 matt : "=&r"(ret)
73 1.5 matt : "m"(*alp));
74 1.5.4.1 nathanw #endif
75 1.1 ragge
76 1.1 ragge return ret;
77 1.1 ragge }
78 1.1 ragge
79 1.5.4.1 nathanw #define VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB))
80 1.5.4.1 nathanw #define __cpu_simple_lock(alp) \
81 1.5.4.1 nathanw { \
82 1.5.4.1 nathanw struct cpu_info *__ci = curcpu(); \
83 1.5.4.1 nathanw \
84 1.5.4.1 nathanw while (__cpu_simple_lock_try(alp) == 0) { \
85 1.5.4.1 nathanw int __s; \
86 1.5.4.1 nathanw \
87 1.5.4.1 nathanw if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) { \
88 1.5.4.1 nathanw __s = splipi(); \
89 1.5.4.1 nathanw cpu_handle_ipi(); \
90 1.5.4.1 nathanw splx(__s); \
91 1.5.4.1 nathanw } \
92 1.5.4.1 nathanw } \
93 1.5.4.1 nathanw }
94 1.5.4.1 nathanw
95 1.5.4.1 nathanw #if 0
96 1.5.4.1 nathanw static __inline void
97 1.5.4.1 nathanw __cpu_simple_lock(__cpu_simple_lock_t *alp)
98 1.5.4.1 nathanw {
99 1.5.4.1 nathanw struct cpu_info *ci = curcpu();
100 1.5.4.1 nathanw
101 1.5.4.1 nathanw while (__cpu_simple_lock_try(alp) == 0) {
102 1.5.4.1 nathanw int s;
103 1.5.4.1 nathanw
104 1.5.4.1 nathanw if (ci->ci_ipimsgs & IPI_SEND_CNCHAR) {
105 1.5.4.1 nathanw s = splipi();
106 1.5.4.1 nathanw cpu_handle_ipi();
107 1.5.4.1 nathanw splx(s);
108 1.5.4.1 nathanw }
109 1.5.4.1 nathanw }
110 1.5.4.1 nathanw
111 1.5.4.1 nathanw #if 0
112 1.5.4.1 nathanw __asm__ __volatile ("movl %0,r1;jsb Slock"
113 1.5.4.1 nathanw : /* No output */
114 1.5.4.1 nathanw : "g"(alp)
115 1.5.4.1 nathanw : "r0","r1","cc","memory");
116 1.5.4.1 nathanw #endif
117 1.5.4.1 nathanw #if 0
118 1.5.4.1 nathanw __asm__ __volatile ("1:;bbssi $0, %0, 1b"
119 1.5.4.1 nathanw : /* No output */
120 1.5.4.1 nathanw : "m"(*alp));
121 1.5.4.1 nathanw #endif
122 1.5.4.1 nathanw }
123 1.5.4.1 nathanw #endif
124 1.5.4.1 nathanw
125 1.5.4.1 nathanw static __inline void
126 1.5.4.1 nathanw __cpu_simple_unlock(__cpu_simple_lock_t *alp)
127 1.5.4.1 nathanw {
128 1.5.4.1 nathanw __asm__ __volatile ("movl %0,r1;jsb Sunlock"
129 1.5.4.1 nathanw : /* No output */
130 1.5.4.1 nathanw : "g"(alp)
131 1.5.4.1 nathanw : "r1","cc","memory");
132 1.5.4.1 nathanw #if 0
133 1.5.4.1 nathanw __asm__ __volatile ("bbcci $0, %0, 1f;1:"
134 1.5.4.1 nathanw : /* No output */
135 1.5.4.1 nathanw : "m"(*alp));
136 1.5.4.1 nathanw #endif
137 1.5.4.1 nathanw }
138 1.5.4.1 nathanw
139 1.5.4.1 nathanw #if defined(MULTIPROCESSOR)
140 1.5.4.1 nathanw /*
141 1.5.4.1 nathanw * On the Vax, interprocessor interrupts can come in at device priority
142 1.5.4.1 nathanw * level or lower. This can cause some problems while waiting for r/w
143 1.5.4.1 nathanw * spinlocks from a high'ish priority level: IPIs that come in will not
144 1.5.4.1 nathanw * be processed. This can lead to deadlock.
145 1.5.4.1 nathanw *
146 1.5.4.1 nathanw * This hook allows IPIs to be processed while a spinlock's interlock
147 1.5.4.1 nathanw * is released.
148 1.5.4.1 nathanw */
149 1.5.4.1 nathanw #define SPINLOCK_SPIN_HOOK \
150 1.5.4.1 nathanw do { \
151 1.5.4.1 nathanw struct cpu_info *__ci = curcpu(); \
152 1.5.4.1 nathanw int __s; \
153 1.5.4.1 nathanw \
154 1.5.4.1 nathanw if (__ci->ci_ipimsgs != 0) { \
155 1.5.4.1 nathanw /* printf("CPU %lu has IPIs pending\n", \
156 1.5.4.1 nathanw __ci->ci_cpuid); */ \
157 1.5.4.1 nathanw __s = splipi(); \
158 1.5.4.1 nathanw cpu_handle_ipi(); \
159 1.5.4.1 nathanw splx(__s); \
160 1.5.4.1 nathanw } \
161 1.5.4.1 nathanw } while (0)
162 1.5.4.1 nathanw #endif /* MULTIPROCESSOR */
163 1.1 ragge #endif /* _VAX_LOCK_H_ */
164