lock.h revision 1.23 1 1.23 yamt /* $NetBSD: lock.h,v 1.23 2008/01/09 00:23:18 yamt Exp $ */
2 1.1 fvdl
3 1.1 fvdl /*-
4 1.12 ad * Copyright (c) 2000, 2006 The NetBSD Foundation, Inc.
5 1.1 fvdl * All rights reserved.
6 1.1 fvdl *
7 1.1 fvdl * This code is derived from software contributed to The NetBSD Foundation
8 1.12 ad * by Jason R. Thorpe and Andrew Doran.
9 1.1 fvdl *
10 1.1 fvdl * Redistribution and use in source and binary forms, with or without
11 1.1 fvdl * modification, are permitted provided that the following conditions
12 1.1 fvdl * are met:
13 1.1 fvdl * 1. Redistributions of source code must retain the above copyright
14 1.1 fvdl * notice, this list of conditions and the following disclaimer.
15 1.1 fvdl * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 fvdl * notice, this list of conditions and the following disclaimer in the
17 1.1 fvdl * documentation and/or other materials provided with the distribution.
18 1.1 fvdl * 3. All advertising materials mentioning features or use of this software
19 1.1 fvdl * must display the following acknowledgement:
20 1.1 fvdl * This product includes software developed by the NetBSD
21 1.1 fvdl * Foundation, Inc. and its contributors.
22 1.1 fvdl * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.1 fvdl * contributors may be used to endorse or promote products derived
24 1.1 fvdl * from this software without specific prior written permission.
25 1.1 fvdl *
26 1.1 fvdl * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.1 fvdl * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1 fvdl * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1 fvdl * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.1 fvdl * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1 fvdl * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1 fvdl * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1 fvdl * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1 fvdl * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1 fvdl * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1 fvdl * POSSIBILITY OF SUCH DAMAGE.
37 1.1 fvdl */
38 1.1 fvdl
39 1.1 fvdl /*
40 1.1 fvdl * Machine-dependent spin lock operations.
41 1.1 fvdl */
42 1.1 fvdl
43 1.9 yamt #ifndef _X86_LOCK_H_
44 1.9 yamt #define _X86_LOCK_H_
45 1.1 fvdl
46 1.15 skrll static __inline int
47 1.15 skrll __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
48 1.15 skrll {
49 1.15 skrll return *__ptr == __SIMPLELOCK_LOCKED;
50 1.15 skrll }
51 1.15 skrll
52 1.15 skrll static __inline int
53 1.15 skrll __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
54 1.15 skrll {
55 1.15 skrll return *__ptr == __SIMPLELOCK_UNLOCKED;
56 1.15 skrll }
57 1.15 skrll
58 1.16 skrll static __inline void
59 1.16 skrll __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
60 1.16 skrll {
61 1.16 skrll
62 1.16 skrll *__ptr = __SIMPLELOCK_LOCKED;
63 1.16 skrll }
64 1.16 skrll
65 1.16 skrll static __inline void
66 1.16 skrll __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
67 1.16 skrll {
68 1.16 skrll
69 1.16 skrll *__ptr = __SIMPLELOCK_UNLOCKED;
70 1.16 skrll }
71 1.16 skrll
72 1.20 ad #ifdef _KERNEL
73 1.20 ad
74 1.20 ad #include <machine/cpufunc.h>
75 1.20 ad
76 1.20 ad void __cpu_simple_lock_init(__cpu_simple_lock_t *);
77 1.20 ad void __cpu_simple_lock(__cpu_simple_lock_t *);
78 1.20 ad int __cpu_simple_lock_try(__cpu_simple_lock_t *);
79 1.20 ad void __cpu_simple_unlock(__cpu_simple_lock_t *);
80 1.20 ad
81 1.20 ad #define SPINLOCK_SPIN_HOOK /* nothing */
82 1.22 ad
83 1.22 ad #ifdef SPINLOCK_BACKOFF_HOOK
84 1.22 ad #undef SPINLOCK_BACKOFF_HOOK
85 1.23 yamt #endif
86 1.20 ad #define SPINLOCK_BACKOFF_HOOK x86_pause()
87 1.20 ad
88 1.20 ad #else
89 1.20 ad
90 1.11 perry static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *)
91 1.21 perry __unused;
92 1.11 perry static __inline void __cpu_simple_lock(__cpu_simple_lock_t *)
93 1.21 perry __unused;
94 1.11 perry static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *)
95 1.21 perry __unused;
96 1.11 perry static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *)
97 1.21 perry __unused;
98 1.1 fvdl
99 1.11 perry static __inline void
100 1.1 fvdl __cpu_simple_lock_init(__cpu_simple_lock_t *lockp)
101 1.1 fvdl {
102 1.1 fvdl
103 1.1 fvdl *lockp = __SIMPLELOCK_UNLOCKED;
104 1.7 yamt __insn_barrier();
105 1.1 fvdl }
106 1.1 fvdl
107 1.20 ad static __inline int
108 1.20 ad __cpu_simple_lock_try(__cpu_simple_lock_t *lockp)
109 1.1 fvdl {
110 1.20 ad uint8_t val;
111 1.1 fvdl
112 1.20 ad val = __SIMPLELOCK_LOCKED;
113 1.20 ad __asm volatile ("xchgb %0,(%2)" :
114 1.20 ad "=r" (val)
115 1.20 ad :"0" (val), "r" (lockp));
116 1.7 yamt __insn_barrier();
117 1.20 ad return val == __SIMPLELOCK_UNLOCKED;
118 1.1 fvdl }
119 1.1 fvdl
120 1.20 ad static __inline void
121 1.20 ad __cpu_simple_lock(__cpu_simple_lock_t *lockp)
122 1.1 fvdl {
123 1.1 fvdl
124 1.20 ad while (!__cpu_simple_lock_try(lockp))
125 1.20 ad /* nothing */;
126 1.7 yamt __insn_barrier();
127 1.1 fvdl }
128 1.1 fvdl
129 1.12 ad /*
130 1.12 ad * Note on x86 memory ordering
131 1.12 ad *
132 1.12 ad * When releasing a lock we must ensure that no stores or loads from within
133 1.12 ad * the critical section are re-ordered by the CPU to occur outside of it:
134 1.12 ad * they must have completed and be visible to other processors once the lock
135 1.12 ad * has been released.
136 1.12 ad *
137 1.12 ad * NetBSD usually runs with the kernel mapped (via MTRR) in a WB (write
138 1.12 ad * back) memory region. In that case, memory ordering on x86 platforms
139 1.12 ad * looks like this:
140 1.12 ad *
141 1.12 ad * i386 All loads/stores occur in instruction sequence.
142 1.12 ad *
143 1.12 ad * i486 All loads/stores occur in instruction sequence. In
144 1.12 ad * Pentium exceptional circumstances, loads can be re-ordered around
145 1.12 ad * stores, but for the purposes of releasing a lock it does
146 1.12 ad * not matter. Stores may not be immediately visible to other
147 1.12 ad * processors as they can be buffered. However, since the
148 1.12 ad * stores are buffered in order the lock release will always be
149 1.12 ad * the last operation in the critical section that becomes
150 1.12 ad * visible to other CPUs.
151 1.12 ad *
152 1.12 ad * Pentium Pro The "Intel 64 and IA-32 Architectures Software Developer's
153 1.12 ad * onwards Manual" volume 3A (order number 248966) says that (1) "Reads
154 1.12 ad * can be carried out speculatively and in any order" and (2)
155 1.12 ad * "Reads can pass buffered stores, but the processor is
156 1.12 ad * self-consistent.". This would be a problem for the below,
157 1.12 ad * and would mandate a locked instruction cycle or load fence
158 1.12 ad * before releasing the simple lock.
159 1.12 ad *
160 1.12 ad * The "Intel Pentium 4 Processor Optimization" guide (order
161 1.12 ad * number 253668-022US) says: "Loads can be moved before stores
162 1.12 ad * that occurred earlier in the program if they are not
163 1.12 ad * predicted to load from the same linear address.". This is
164 1.12 ad * not a problem since the only loads that can be re-ordered
165 1.12 ad * take place once the lock has been released via a store.
166 1.12 ad *
167 1.12 ad * The above two documents seem to contradict each other,
168 1.12 ad * however with the exception of early steppings of the Pentium
169 1.12 ad * Pro, the second document is closer to the truth: a store
170 1.12 ad * will always act as a load fence for all loads that precede
171 1.12 ad * the store in instruction order.
172 1.12 ad *
173 1.12 ad * Again, note that stores can be buffered and will not always
174 1.12 ad * become immediately visible to other CPUs: they are however
175 1.12 ad * buffered in order.
176 1.12 ad *
177 1.12 ad * AMD64 Stores occur in order and are buffered. Loads can be
178 1.12 ad * reordered, however stores act as load fences, meaning that
179 1.12 ad * loads can not be reordered around stores.
180 1.12 ad */
181 1.11 perry static __inline void
182 1.1 fvdl __cpu_simple_unlock(__cpu_simple_lock_t *lockp)
183 1.1 fvdl {
184 1.1 fvdl
185 1.7 yamt __insn_barrier();
186 1.1 fvdl *lockp = __SIMPLELOCK_UNLOCKED;
187 1.1 fvdl }
188 1.1 fvdl
189 1.13 ad #endif /* _KERNEL */
190 1.1 fvdl
191 1.9 yamt #endif /* _X86_LOCK_H_ */
192