ltsleep.c revision 1.28.12.1 1 /* $NetBSD: ltsleep.c,v 1.28.12.1 2012/02/18 07:35:46 mrg Exp $ */
2
3 /*
4 * Copyright (c) 2009, 2010 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 /*
29 * Implementation of the tsleep/mtsleep kernel sleep interface. There
30 * are two sides to our implementation. For historic spinlocks we
31 * assume the kernel is giantlocked and use kernel giantlock as the
32 * wait interlock. For mtsleep, we use the interlock supplied by
33 * the caller. This duality leads to some if/else messiness in the code ...
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: ltsleep.c,v 1.28.12.1 2012/02/18 07:35:46 mrg Exp $");
38
39 #include <sys/param.h>
40 #include <sys/kernel.h>
41 #include <sys/proc.h>
42 #include <sys/queue.h>
43
44 #include <rump/rumpuser.h>
45
46 #include "rump_private.h"
47
48 struct ltsleeper {
49 wchan_t id;
50 union {
51 struct rumpuser_cv *user;
52 kcondvar_t kern;
53 } u;
54 bool iskwait;
55 LIST_ENTRY(ltsleeper) entries;
56 };
57 #define ucv u.user
58 #define kcv u.kern
59
60 static LIST_HEAD(, ltsleeper) sleepers = LIST_HEAD_INITIALIZER(sleepers);
61 static struct rumpuser_mtx *qlock;
62
63 static int
64 sleeper(wchan_t ident, int timo, kmutex_t *kinterlock)
65 {
66 struct ltsleeper lts;
67 struct timespec ts, ticks;
68 int rv;
69
70 lts.id = ident;
71 if (kinterlock) {
72 lts.iskwait = true;
73 cv_init(<s.kcv, "mtsleep");
74 } else {
75 lts.iskwait = false;
76 rumpuser_cv_init(<s.ucv);
77 }
78
79 rumpuser_mutex_enter_nowrap(qlock);
80 LIST_INSERT_HEAD(&sleepers, <s, entries);
81 rumpuser_mutex_exit(qlock);
82
83 if (timo) {
84 if (kinterlock) {
85 rv = cv_timedwait(<s.kcv, kinterlock, timo);
86 } else {
87 /*
88 * Calculate wakeup-time.
89 * XXX: should assert nanotime() does not block,
90 * i.e. yield the cpu and/or biglock.
91 */
92 ticks.tv_sec = timo / hz;
93 ticks.tv_nsec = (timo % hz) * (1000000000/hz);
94 nanotime(&ts);
95 timespecadd(&ts, &ticks, &ts);
96
97 rv = rumpuser_cv_timedwait(lts.ucv, rump_giantlock,
98 ts.tv_sec, ts.tv_nsec);
99 }
100
101 if (rv != 0)
102 rv = EWOULDBLOCK;
103 } else {
104 if (kinterlock) {
105 cv_wait(<s.kcv, kinterlock);
106 } else {
107 rumpuser_cv_wait(lts.ucv, rump_giantlock);
108 }
109 rv = 0;
110 }
111
112 rumpuser_mutex_enter_nowrap(qlock);
113 LIST_REMOVE(<s, entries);
114 rumpuser_mutex_exit(qlock);
115
116 if (kinterlock)
117 cv_destroy(<s.kcv);
118 else
119 rumpuser_cv_destroy(lts.ucv);
120
121 return rv;
122 }
123
124 int
125 tsleep(wchan_t ident, pri_t prio, const char *wmesg, int timo)
126 {
127 int rv, nlocks;
128
129 /*
130 * Since we cannot use slock as the rumpuser interlock,
131 * require that everyone using this prehistoric interface
132 * is biglocked. Wrap around the biglock and drop lockcnt,
133 * but retain the rumpuser mutex so that we can use it as an
134 * interlock to rumpuser_cv_wait().
135 */
136 rump_kernel_bigwrap(&nlocks);
137 rv = sleeper(ident, timo, NULL);
138 rump_kernel_bigunwrap(nlocks);
139
140 return rv;
141 }
142
143 int
144 mtsleep(wchan_t ident, pri_t prio, const char *wmesg, int timo, kmutex_t *lock)
145 {
146 int rv;
147
148 rv = sleeper(ident, timo, lock);
149 if (prio & PNORELOCK)
150 mutex_exit(lock);
151
152 return rv;
153 }
154
155 void
156 wakeup(wchan_t ident)
157 {
158 struct ltsleeper *ltsp;
159
160 rumpuser_mutex_enter_nowrap(qlock);
161 LIST_FOREACH(ltsp, &sleepers, entries) {
162 if (ltsp->id == ident) {
163 if (ltsp->iskwait) {
164 cv_broadcast(<sp->kcv);
165 } else {
166 rumpuser_cv_broadcast(ltsp->ucv);
167 }
168 }
169 }
170 rumpuser_mutex_exit(qlock);
171 }
172
173 void
174 rump_tsleep_init()
175 {
176
177 rumpuser_mutex_init(&qlock);
178 }
179