ltsleep.c revision 1.28 1 /* $NetBSD: ltsleep.c,v 1.28 2010/12/01 14:59:38 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2009, 2010 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 /*
29 * Implementation of the ltsleep/mtsleep kernel sleep interface. There
30 * are two sides to our implementation. For historic spinlocks we
31 * assume the kernel is giantlocked and use kernel giantlock as the
32 * wait interlock. For mtsleep, we use the interlock supplied by
33 * the caller. This duality leads to some if/else messiness in the code ...
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: ltsleep.c,v 1.28 2010/12/01 14:59:38 pooka Exp $");
38
39 #include <sys/param.h>
40 #include <sys/kernel.h>
41 #include <sys/proc.h>
42 #include <sys/queue.h>
43 #include <sys/simplelock.h>
44
45 #include <rump/rumpuser.h>
46
47 #include "rump_private.h"
48
49 struct ltsleeper {
50 wchan_t id;
51 union {
52 struct rumpuser_cv *user;
53 kcondvar_t kern;
54 } u;
55 bool iskwait;
56 LIST_ENTRY(ltsleeper) entries;
57 };
58 #define ucv u.user
59 #define kcv u.kern
60
61 static LIST_HEAD(, ltsleeper) sleepers = LIST_HEAD_INITIALIZER(sleepers);
62 static struct rumpuser_mtx *qlock;
63
64 static int
65 sleeper(wchan_t ident, int timo, kmutex_t *kinterlock)
66 {
67 struct ltsleeper lts;
68 struct timespec ts, ticks;
69 int rv;
70
71 lts.id = ident;
72 if (kinterlock) {
73 lts.iskwait = true;
74 cv_init(<s.kcv, "mtsleep");
75 } else {
76 lts.iskwait = false;
77 rumpuser_cv_init(<s.ucv);
78 }
79
80 rumpuser_mutex_enter_nowrap(qlock);
81 LIST_INSERT_HEAD(&sleepers, <s, entries);
82 rumpuser_mutex_exit(qlock);
83
84 if (timo) {
85 if (kinterlock) {
86 rv = cv_timedwait(<s.kcv, kinterlock, timo);
87 } else {
88 /*
89 * Calculate wakeup-time.
90 * XXX: should assert nanotime() does not block,
91 * i.e. yield the cpu and/or biglock.
92 */
93 ticks.tv_sec = timo / hz;
94 ticks.tv_nsec = (timo % hz) * (1000000000/hz);
95 nanotime(&ts);
96 timespecadd(&ts, &ticks, &ts);
97
98 rv = rumpuser_cv_timedwait(lts.ucv, rump_giantlock,
99 ts.tv_sec, ts.tv_nsec);
100 }
101
102 if (rv != 0)
103 rv = EWOULDBLOCK;
104 } else {
105 if (kinterlock) {
106 cv_wait(<s.kcv, kinterlock);
107 } else {
108 rumpuser_cv_wait(lts.ucv, rump_giantlock);
109 }
110 rv = 0;
111 }
112
113 rumpuser_mutex_enter_nowrap(qlock);
114 LIST_REMOVE(<s, entries);
115 rumpuser_mutex_exit(qlock);
116
117 if (kinterlock)
118 cv_destroy(<s.kcv);
119 else
120 rumpuser_cv_destroy(lts.ucv);
121
122 return rv;
123 }
124
125 int
126 ltsleep(wchan_t ident, pri_t prio, const char *wmesg, int timo,
127 volatile struct simplelock *slock)
128 {
129 int rv, nlocks;
130
131 if (slock)
132 simple_unlock(slock);
133
134 /*
135 * Since we cannot use slock as the rumpuser interlock,
136 * require that everyone using this prehistoric interface
137 * is biglocked. Wrap around the biglock and drop lockcnt,
138 * but retain the rumpuser mutex so that we can use it as an
139 * interlock to rumpuser_cv_wait().
140 */
141 rump_kernel_bigwrap(&nlocks);
142 rv = sleeper(ident, timo, NULL);
143 rump_kernel_bigunwrap(nlocks);
144
145 if (slock && (prio & PNORELOCK) == 0)
146 simple_lock(slock);
147
148 return rv;
149 }
150
151 int
152 mtsleep(wchan_t ident, pri_t prio, const char *wmesg, int timo,
153 kmutex_t *lock)
154 {
155 int rv;
156
157 rv = sleeper(ident, timo, lock);
158 if (prio & PNORELOCK)
159 mutex_exit(lock);
160
161 return rv;
162 }
163
164 static void
165 do_wakeup(wchan_t ident, bool wakeup_all)
166 {
167 struct ltsleeper *ltsp;
168
169 rumpuser_mutex_enter_nowrap(qlock);
170 LIST_FOREACH(ltsp, &sleepers, entries) {
171 if (ltsp->id == ident) {
172 if (wakeup_all) {
173 if (ltsp->iskwait) {
174 cv_broadcast(<sp->kcv);
175 } else {
176 rumpuser_cv_broadcast(ltsp->ucv);
177 }
178 } else {
179 if (ltsp->iskwait) {
180 cv_signal(<sp->kcv);
181 } else {
182 rumpuser_cv_signal(ltsp->ucv);
183 }
184 }
185 }
186 }
187 rumpuser_mutex_exit(qlock);
188 }
189
190 void
191 wakeup(wchan_t ident)
192 {
193
194 do_wakeup(ident, true);
195 }
196
197 void
198 wakeup_one(wchan_t ident)
199 {
200
201 do_wakeup(ident, false);
202 }
203
204 void
205 rump_tsleep_init()
206 {
207
208 rumpuser_mutex_init(&qlock);
209 }
210