ltsleep.c revision 1.6.10.5 1 /* $NetBSD: ltsleep.c,v 1.6.10.5 2010/08/11 22:55:06 yamt Exp $ */
2
3 /*
4 * Copyright (c) 2009, 2010 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 /*
29 * Implementation of the ltsleep/mtsleep kernel sleep interface. There
30 * are two sides to our implementation. For historic spinlocks we
31 * assume the kernel is giantlocked and use kernel giantlock as the
32 * wait interlock. For mtsleep, we use the interlock supplied by
33 * the caller. This duality leads to some if/else messiness in the code ...
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: ltsleep.c,v 1.6.10.5 2010/08/11 22:55:06 yamt Exp $");
38
39 #include <sys/param.h>
40 #include <sys/kernel.h>
41 #include <sys/proc.h>
42 #include <sys/queue.h>
43 #include <sys/simplelock.h>
44
45 #include <rump/rumpuser.h>
46
47 #include "rump_private.h"
48
49 struct ltsleeper {
50 wchan_t id;
51 union {
52 struct rumpuser_cv *user;
53 kcondvar_t kern;
54 } u;
55 bool iskwait;
56 LIST_ENTRY(ltsleeper) entries;
57 };
58 #define ucv u.user
59 #define kcv u.kern
60
61 static LIST_HEAD(, ltsleeper) sleepers = LIST_HEAD_INITIALIZER(sleepers);
62 static struct rumpuser_mtx *qlock;
63
64 static int
65 sleeper(wchan_t ident, int timo, kmutex_t *kinterlock)
66 {
67 struct ltsleeper lts;
68 struct timespec ts, ticks;
69 int rv;
70
71 lts.id = ident;
72 if (kinterlock) {
73 lts.iskwait = true;
74 cv_init(<s.kcv, "mtsleep");
75 } else {
76 lts.iskwait = false;
77 rumpuser_cv_init(<s.ucv);
78 }
79
80 rumpuser_mutex_enter_nowrap(qlock);
81 LIST_INSERT_HEAD(&sleepers, <s, entries);
82 rumpuser_mutex_exit(qlock);
83
84 if (timo) {
85 if (kinterlock) {
86 rv = cv_timedwait(<s.kcv, kinterlock, timo);
87 } else {
88 /*
89 * Calculate wakeup-time.
90 * XXX: should assert nanotime() does not block,
91 * i.e. yield the cpu and/or biglock.
92 */
93 ticks.tv_sec = timo / hz;
94 ticks.tv_nsec = (timo % hz) * (1000000000/hz);
95 nanotime(&ts);
96 timespecadd(&ts, &ticks, &ts);
97
98 rv = rumpuser_cv_timedwait(lts.ucv, rump_giantlock,
99 ts.tv_sec, ts.tv_nsec);
100 }
101
102 if (rv != 0)
103 rv = EWOULDBLOCK;
104 } else {
105 if (kinterlock) {
106 cv_wait(<s.kcv, kinterlock);
107 } else {
108 rumpuser_cv_wait(lts.ucv, rump_giantlock);
109 }
110 rv = 0;
111 }
112
113 rumpuser_mutex_enter_nowrap(qlock);
114 LIST_REMOVE(<s, entries);
115 rumpuser_mutex_exit(qlock);
116
117 if (kinterlock)
118 cv_destroy(<s.kcv);
119 else
120 rumpuser_cv_destroy(lts.ucv);
121
122 return rv;
123 }
124
125 int
126 ltsleep(wchan_t ident, pri_t prio, const char *wmesg, int timo,
127 volatile struct simplelock *slock)
128 {
129 int rv, nlocks;
130
131 /*
132 * Since we cannot use slock as the rumpuser interlock,
133 * require that everyone using this prehistoric interface
134 * is biglocked.
135 */
136 KASSERT(rump_kernel_isbiglocked());
137 if (slock)
138 simple_unlock(slock);
139
140 rump_kernel_unlock_allbutone(&nlocks);
141 rv = sleeper(ident, timo, NULL);
142 rump_kernel_ununlock_allbutone(nlocks);
143
144 if (slock && (prio & PNORELOCK) == 0)
145 simple_lock(slock);
146
147 return rv;
148 }
149
150 int
151 mtsleep(wchan_t ident, pri_t prio, const char *wmesg, int timo,
152 kmutex_t *lock)
153 {
154 int rv;
155
156 rv = sleeper(ident, timo, lock);
157 if (prio & PNORELOCK)
158 mutex_exit(lock);
159
160 return rv;
161 }
162
163 static void
164 do_wakeup(wchan_t ident, bool wakeup_all)
165 {
166 struct ltsleeper *ltsp;
167
168 rumpuser_mutex_enter_nowrap(qlock);
169 LIST_FOREACH(ltsp, &sleepers, entries) {
170 if (ltsp->id == ident) {
171 if (wakeup_all) {
172 if (ltsp->iskwait) {
173 cv_broadcast(<sp->kcv);
174 } else {
175 rumpuser_cv_broadcast(ltsp->ucv);
176 }
177 } else {
178 if (ltsp->iskwait) {
179 cv_signal(<sp->kcv);
180 } else {
181 rumpuser_cv_signal(ltsp->ucv);
182 }
183 }
184 }
185 }
186 rumpuser_mutex_exit(qlock);
187 }
188
189 void
190 wakeup(wchan_t ident)
191 {
192
193 do_wakeup(ident, true);
194 }
195
196 void
197 wakeup_one(wchan_t ident)
198 {
199
200 do_wakeup(ident, false);
201 }
202
203 void
204 rump_tsleep_init()
205 {
206
207 rumpuser_mutex_init(&qlock);
208 }
209