spinlock.h revision 1.7.18.1 1 /* $NetBSD: spinlock.h,v 1.7.18.1 2019/06/10 22:08:32 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LINUX_SPINLOCK_H_
33 #define _LINUX_SPINLOCK_H_
34
35 #include <sys/cdefs.h>
36 #include <sys/mutex.h>
37
38 #include <machine/limits.h>
39 #include <linux/irqflags.h>
40 #include <linux/preempt.h>
41
42 #define __acquires(lock) /* XXX lockdep stuff */
43 #define __releases(lock) /* XXX lockdep stuff */
44
45 typedef struct spinlock {
46 kmutex_t sl_lock;
47 } spinlock_t;
48
49 static inline int
50 spin_is_locked(spinlock_t *spinlock)
51 {
52 return mutex_owned(&spinlock->sl_lock);
53 }
54
55 static inline void
56 spin_lock(spinlock_t *spinlock)
57 {
58 mutex_enter(&spinlock->sl_lock);
59 }
60
61 static inline void
62 spin_unlock(spinlock_t *spinlock)
63 {
64 mutex_exit(&spinlock->sl_lock);
65 }
66
67 static inline void
68 spin_lock_irq(spinlock_t *spinlock)
69 {
70 spin_lock(spinlock);
71 }
72
73 static inline void
74 spin_unlock_irq(spinlock_t *spinlock)
75 {
76 spin_unlock(spinlock);
77 }
78
79 /* Must be a macro because the second argument is to be assigned. */
80 #define spin_lock_irqsave(SPINLOCK, FLAGS) \
81 do { \
82 (FLAGS) = 0; \
83 mutex_enter(&((spinlock_t *)(SPINLOCK))->sl_lock); \
84 } while (0)
85
86 static inline void
87 spin_unlock_irqrestore(spinlock_t *spinlock, unsigned long __unused flags)
88 {
89 mutex_exit(&spinlock->sl_lock);
90 }
91
92 static inline void
93 spin_lock_init(spinlock_t *spinlock)
94 {
95 /* XXX What's the right IPL? IPL_DRM...? */
96 mutex_init(&spinlock->sl_lock, MUTEX_DEFAULT, IPL_VM);
97 }
98
99 /*
100 * XXX Linux doesn't ever destroy spin locks, it seems. We'll have to
101 * kludge it up.
102 */
103
104 static inline void
105 spin_lock_destroy(spinlock_t *spinlock)
106 {
107 mutex_destroy(&spinlock->sl_lock);
108 }
109
110 /* This is a macro to make the panic message clearer. */
111 #define assert_spin_locked(spinlock) \
112 KASSERT(mutex_owned(&(spinlock)->sl_lock))
113
114 /*
115 * Stupid reader/writer spin locks. No attempt to avoid writer
116 * starvation. Must allow recursive readers. We use mutex and state
117 * instead of compare-and-swap for expedience and LOCKDEBUG support.
118 */
119
120 typedef struct linux_rwlock {
121 kmutex_t rw_lock;
122 unsigned rw_nreaders;
123 } rwlock_t;
124
125 static inline void
126 rwlock_init(rwlock_t *rw)
127 {
128
129 mutex_init(&rw->rw_lock, MUTEX_DEFAULT, IPL_VM);
130 rw->rw_nreaders = 0;
131 }
132
133 static inline void
134 rwlock_destroy(rwlock_t *rw)
135 {
136
137 KASSERTMSG(rw->rw_nreaders == 0,
138 "rwlock still held by %u readers", rw->rw_nreaders);
139 mutex_destroy(&rw->rw_lock);
140 }
141
142 static inline void
143 write_lock_irq(rwlock_t *rw)
144 {
145
146 for (;;) {
147 mutex_spin_enter(&rw->rw_lock);
148 if (rw->rw_nreaders == 0)
149 break;
150 mutex_spin_exit(&rw->rw_lock);
151 }
152 }
153
154 static inline void
155 write_unlock_irq(rwlock_t *rw)
156 {
157
158 KASSERT(rw->rw_nreaders == 0);
159 mutex_spin_exit(&rw->rw_lock);
160 }
161
162 static inline void
163 read_lock(rwlock_t *rw)
164 {
165
166 mutex_spin_enter(&rw->rw_lock);
167 KASSERT(rw->rw_nreaders < UINT_MAX);
168 rw->rw_nreaders++;
169 mutex_spin_exit(&rw->rw_lock);
170 }
171
172 static inline void
173 read_unlock(rwlock_t *rw)
174 {
175
176 mutex_spin_enter(&rw->rw_lock);
177 KASSERT(0 < rw->rw_nreaders);
178 rw->rw_nreaders--;
179 mutex_spin_exit(&rw->rw_lock);
180 }
181
182 #endif /* _LINUX_SPINLOCK_H_ */
183