spinlock.h revision 1.13 1 /* $NetBSD: spinlock.h,v 1.13 2021/12/19 11:47:55 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LINUX_SPINLOCK_H_
33 #define _LINUX_SPINLOCK_H_
34
35 #include <sys/cdefs.h>
36 #include <sys/mutex.h>
37
38 #include <machine/limits.h>
39
40 #include <linux/irqflags.h>
41 #include <linux/lockdep.h>
42 #include <linux/preempt.h>
43
44 typedef struct spinlock {
45 kmutex_t sl_lock;
46 } spinlock_t;
47
48 static inline int
49 spin_is_locked(spinlock_t *spinlock)
50 {
51 return mutex_owned(&spinlock->sl_lock);
52 }
53
54 static inline void
55 spin_lock(spinlock_t *spinlock)
56 {
57 mutex_enter(&spinlock->sl_lock);
58 }
59
60 static inline void
61 spin_unlock(spinlock_t *spinlock)
62 {
63 mutex_exit(&spinlock->sl_lock);
64 }
65
66 static inline void
67 spin_lock_irq(spinlock_t *spinlock)
68 {
69 spin_lock(spinlock);
70 }
71
72 static inline void
73 spin_unlock_irq(spinlock_t *spinlock)
74 {
75 spin_unlock(spinlock);
76 }
77
78 /* Must be a macro because the second argument is to be assigned. */
79 #define spin_lock_irqsave(SPINLOCK, FLAGS) \
80 do { \
81 (FLAGS) = 0; \
82 mutex_enter(&((spinlock_t *)(SPINLOCK))->sl_lock); \
83 } while (0)
84
85 #define spin_trylock_irqsave(SPINLOCK, FLAGS) \
86 ( (FLAGS) = 0, \
87 mutex_tryenter(&((spinlock_t *)(SPINLOCK))->sl_lock) )
88
89 static inline void
90 spin_unlock_irqrestore(spinlock_t *spinlock, unsigned long __unused flags)
91 {
92 mutex_exit(&spinlock->sl_lock);
93 }
94
95 static inline void
96 spin_lock_nested(spinlock_t *spinlock, int subclass)
97 {
98 spin_lock(spinlock);
99 }
100
101 #define spin_lock_irqsave_nested(SPINLOCK, FLAGS, SUBCLASS) \
102 spin_lock_irqsave(SPINLOCK, FLAGS)
103
104 static inline void
105 spin_lock_init(spinlock_t *spinlock)
106 {
107 /* XXX What's the right IPL? IPL_DRM...? */
108 mutex_init(&spinlock->sl_lock, MUTEX_DEFAULT, IPL_VM);
109 }
110
111 /*
112 * XXX Linux doesn't ever destroy spin locks, it seems. We'll have to
113 * kludge it up.
114 */
115
116 static inline void
117 spin_lock_destroy(spinlock_t *spinlock)
118 {
119 mutex_destroy(&spinlock->sl_lock);
120 }
121
122 /* This is a macro to make the panic message clearer. */
123 #define assert_spin_locked(spinlock) \
124 KASSERT(mutex_owned(&(spinlock)->sl_lock))
125
126 /*
127 * Stupid reader/writer spin locks. No attempt to avoid writer
128 * starvation. Must allow recursive readers. We use mutex and state
129 * instead of compare-and-swap for expedience and LOCKDEBUG support.
130 */
131
132 typedef struct linux_rwlock {
133 kmutex_t rw_lock;
134 unsigned rw_nreaders;
135 } rwlock_t;
136
137 static inline void
138 rwlock_init(rwlock_t *rw)
139 {
140
141 mutex_init(&rw->rw_lock, MUTEX_DEFAULT, IPL_VM);
142 rw->rw_nreaders = 0;
143 }
144
145 static inline void
146 rwlock_destroy(rwlock_t *rw)
147 {
148
149 KASSERTMSG(rw->rw_nreaders == 0,
150 "rwlock still held by %u readers", rw->rw_nreaders);
151 mutex_destroy(&rw->rw_lock);
152 }
153
154 static inline void
155 write_lock_irq(rwlock_t *rw)
156 {
157
158 for (;;) {
159 mutex_spin_enter(&rw->rw_lock);
160 if (rw->rw_nreaders == 0)
161 break;
162 mutex_spin_exit(&rw->rw_lock);
163 }
164 }
165
166 static inline void
167 write_unlock_irq(rwlock_t *rw)
168 {
169
170 KASSERT(rw->rw_nreaders == 0);
171 mutex_spin_exit(&rw->rw_lock);
172 }
173
174 static inline void
175 read_lock(rwlock_t *rw)
176 {
177
178 mutex_spin_enter(&rw->rw_lock);
179 KASSERT(rw->rw_nreaders < UINT_MAX);
180 rw->rw_nreaders++;
181 mutex_spin_exit(&rw->rw_lock);
182 }
183
184 static inline void
185 read_unlock(rwlock_t *rw)
186 {
187
188 mutex_spin_enter(&rw->rw_lock);
189 KASSERT(0 < rw->rw_nreaders);
190 rw->rw_nreaders--;
191 mutex_spin_exit(&rw->rw_lock);
192 }
193
194 static inline void
195 local_bh_disable(void)
196 {
197 }
198
199 static inline void
200 local_bh_enable(void)
201 {
202 }
203
204 #endif /* _LINUX_SPINLOCK_H_ */
205