lock.h revision 1.4 1 1.4 yamt /* $NetBSD: lock.h,v 1.4 2003/10/26 11:15:16 yamt Exp $ */
2 1.1 fvdl
3 1.1 fvdl /*-
4 1.1 fvdl * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 1.1 fvdl * All rights reserved.
6 1.1 fvdl *
7 1.1 fvdl * This code is derived from software contributed to The NetBSD Foundation
8 1.1 fvdl * by Jason R. Thorpe.
9 1.1 fvdl *
10 1.1 fvdl * Redistribution and use in source and binary forms, with or without
11 1.1 fvdl * modification, are permitted provided that the following conditions
12 1.1 fvdl * are met:
13 1.1 fvdl * 1. Redistributions of source code must retain the above copyright
14 1.1 fvdl * notice, this list of conditions and the following disclaimer.
15 1.1 fvdl * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 fvdl * notice, this list of conditions and the following disclaimer in the
17 1.1 fvdl * documentation and/or other materials provided with the distribution.
18 1.1 fvdl * 3. All advertising materials mentioning features or use of this software
19 1.1 fvdl * must display the following acknowledgement:
20 1.1 fvdl * This product includes software developed by the NetBSD
21 1.1 fvdl * Foundation, Inc. and its contributors.
22 1.1 fvdl * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.1 fvdl * contributors may be used to endorse or promote products derived
24 1.1 fvdl * from this software without specific prior written permission.
25 1.1 fvdl *
26 1.1 fvdl * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.1 fvdl * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1 fvdl * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1 fvdl * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.1 fvdl * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1 fvdl * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1 fvdl * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1 fvdl * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1 fvdl * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1 fvdl * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1 fvdl * POSSIBILITY OF SUCH DAMAGE.
37 1.1 fvdl */
38 1.1 fvdl
39 1.1 fvdl /*
40 1.1 fvdl * Machine-dependent spin lock operations.
41 1.1 fvdl */
42 1.1 fvdl
43 1.1 fvdl #ifndef _I386_LOCK_H_
44 1.1 fvdl #define _I386_LOCK_H_
45 1.1 fvdl
46 1.1 fvdl #if defined(_KERNEL_OPT)
47 1.1 fvdl #include "opt_lockdebug.h"
48 1.1 fvdl #endif
49 1.1 fvdl
50 1.2 fvdl #include <machine/cpufunc.h>
51 1.1 fvdl
52 1.1 fvdl /*
53 1.1 fvdl * compiler barrier: prevent reordering of instructions.
54 1.1 fvdl * XXX something similar will move to <sys/cdefs.h>
55 1.1 fvdl * or thereabouts.
56 1.1 fvdl * This prevents the compiler from reordering code around
57 1.1 fvdl * this "instruction", acting as a sequence point for code generation.
58 1.1 fvdl */
59 1.1 fvdl
60 1.1 fvdl #define __lockbarrier() __asm __volatile("": : :"memory")
61 1.1 fvdl
62 1.1 fvdl #ifdef LOCKDEBUG
63 1.1 fvdl
64 1.1 fvdl extern void __cpu_simple_lock_init __P((__cpu_simple_lock_t *));
65 1.1 fvdl extern void __cpu_simple_lock __P((__cpu_simple_lock_t *));
66 1.1 fvdl extern int __cpu_simple_lock_try __P((__cpu_simple_lock_t *));
67 1.1 fvdl extern void __cpu_simple_unlock __P((__cpu_simple_lock_t *));
68 1.1 fvdl
69 1.1 fvdl #else
70 1.1 fvdl
71 1.1 fvdl #include <machine/atomic.h>
72 1.1 fvdl
73 1.1 fvdl static __inline void __cpu_simple_lock_init __P((__cpu_simple_lock_t *))
74 1.1 fvdl __attribute__((__unused__));
75 1.1 fvdl static __inline void __cpu_simple_lock __P((__cpu_simple_lock_t *))
76 1.1 fvdl __attribute__((__unused__));
77 1.1 fvdl static __inline int __cpu_simple_lock_try __P((__cpu_simple_lock_t *))
78 1.1 fvdl __attribute__((__unused__));
79 1.1 fvdl static __inline void __cpu_simple_unlock __P((__cpu_simple_lock_t *))
80 1.1 fvdl __attribute__((__unused__));
81 1.1 fvdl
82 1.1 fvdl static __inline void
83 1.1 fvdl __cpu_simple_lock_init(__cpu_simple_lock_t *lockp)
84 1.1 fvdl {
85 1.1 fvdl
86 1.1 fvdl *lockp = __SIMPLELOCK_UNLOCKED;
87 1.1 fvdl __lockbarrier();
88 1.1 fvdl }
89 1.1 fvdl
90 1.1 fvdl static __inline void
91 1.1 fvdl __cpu_simple_lock(__cpu_simple_lock_t *lockp)
92 1.1 fvdl {
93 1.1 fvdl
94 1.1 fvdl while (x86_atomic_testset_i(lockp, __SIMPLELOCK_LOCKED)
95 1.2 fvdl != __SIMPLELOCK_UNLOCKED)
96 1.2 fvdl x86_pause();
97 1.1 fvdl __lockbarrier();
98 1.1 fvdl }
99 1.1 fvdl
100 1.1 fvdl static __inline int
101 1.1 fvdl __cpu_simple_lock_try(__cpu_simple_lock_t *lockp)
102 1.1 fvdl {
103 1.1 fvdl int r = (x86_atomic_testset_i(lockp, __SIMPLELOCK_LOCKED)
104 1.1 fvdl == __SIMPLELOCK_UNLOCKED);
105 1.1 fvdl
106 1.1 fvdl __lockbarrier();
107 1.1 fvdl
108 1.1 fvdl return (r);
109 1.1 fvdl }
110 1.1 fvdl
111 1.1 fvdl static __inline void
112 1.1 fvdl __cpu_simple_unlock(__cpu_simple_lock_t *lockp)
113 1.1 fvdl {
114 1.1 fvdl
115 1.1 fvdl __lockbarrier();
116 1.1 fvdl *lockp = __SIMPLELOCK_UNLOCKED;
117 1.1 fvdl }
118 1.1 fvdl
119 1.1 fvdl #endif /* !LOCKDEBUG */
120 1.4 yamt
121 1.4 yamt #ifdef _KERNEL
122 1.4 yamt #define SPINLOCK_SPIN_HOOK x86_pause()
123 1.4 yamt #endif
124 1.1 fvdl
125 1.1 fvdl #endif /* _I386_LOCK_H_ */
126