intr.c revision 1.8.2.2 1 /* $NetBSD: intr.c,v 1.8.2.2 2009/03/03 18:34:07 skrll Exp $ */
2
3 /*
4 * Copyright (c) 2008 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.8.2.2 2009/03/03 18:34:07 skrll Exp $");
30
31 #include <sys/param.h>
32 #include <sys/cpu.h>
33 #include <sys/kmem.h>
34 #include <sys/kthread.h>
35 #include <sys/intr.h>
36
37 #include <rump/rumpuser.h>
38
39 #include "rump_private.h"
40
41 /*
42 * Interrupt simulator. It executes hardclock() and softintrs.
43 */
44
45 time_t time_uptime = 0;
46
47 struct softint {
48 void (*si_func)(void *);
49 void *si_arg;
50 bool si_onlist;
51 bool si_mpsafe;
52
53 LIST_ENTRY(softint) si_entries;
54 };
55 static LIST_HEAD(, softint) si_pending = LIST_HEAD_INITIALIZER(si_pending);
56 static kmutex_t si_mtx;
57 static kcondvar_t si_cv;
58
59 #define INTRTHREAD_DEFAULT 2
60 #define INTRTHREAD_MAX 20
61 static int wrkidle, wrktotal;
62
63 static void sithread(void *);
64
65 static void
66 makeworker(bool bootstrap)
67 {
68 int rv;
69
70 if (wrktotal > INTRTHREAD_MAX) {
71 /* XXX: ratecheck */
72 printf("maximum interrupt threads (%d) reached\n",
73 INTRTHREAD_MAX);
74 return;
75 }
76 rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE | KTHREAD_INTR, NULL,
77 sithread, NULL, NULL, "rumpsi");
78 if (rv) {
79 if (bootstrap)
80 panic("intr thread creation failed %d", rv);
81 else
82 printf("intr thread creation failed %d\n", rv);
83 } else {
84 wrkidle++;
85 wrktotal++;
86 }
87 }
88
89 /* rumpuser structures since we call rumpuser interfaces directly */
90 static struct rumpuser_cv *clockcv;
91 static struct rumpuser_mtx *clockmtx;
92 static struct timespec rump_clock;
93
94 void
95 rump_gettime(struct timespec *ts)
96 {
97 struct timespec attempt;
98
99 /* XXX: this is completely bogus */
100 do {
101 attempt = rump_clock;
102 } while (memcmp(&attempt, &rump_clock, sizeof(struct timespec)) != 0);
103
104 *ts = attempt;
105 }
106
107 /*
108 * clock "interrupt"
109 */
110 static void
111 doclock(void *noarg)
112 {
113 struct timespec tick;
114 uint64_t sec, nsec;
115 static int ticks = 0;
116 extern int hz;
117 int error;
118
119 rumpuser_gettime(&sec, &nsec, &error);
120 rump_clock.tv_sec = sec;
121 rump_clock.tv_nsec = nsec;
122 tick.tv_sec = 0;
123 tick.tv_nsec = 1000000000/hz;
124
125 rumpuser_mutex_enter(clockmtx);
126 rumpuser_cv_signal(clockcv);
127
128 for (;;) {
129 callout_hardclock();
130
131 if (++ticks == hz) {
132 time_uptime++;
133 ticks = 0;
134 }
135
136 /* wait until the next tick */
137 while (rumpuser_cv_timedwait(clockcv, clockmtx,
138 &rump_clock) != EWOULDBLOCK)
139 continue;
140 timespecadd(&rump_clock, &tick, &rump_clock);
141 }
142 }
143
144 /*
145 * run a scheduled soft interrupt
146 */
147 static void
148 sithread(void *arg)
149 {
150 struct softint *si;
151 void (*func)(void *) = NULL;
152 void *funarg;
153 bool mpsafe;
154
155 mutex_enter(&si_mtx);
156 for (;;) {
157 if (!LIST_EMPTY(&si_pending)) {
158 si = LIST_FIRST(&si_pending);
159 func = si->si_func;
160 funarg = si->si_arg;
161 mpsafe = si->si_mpsafe;
162
163 si->si_onlist = false;
164 LIST_REMOVE(si, si_entries);
165 } else {
166 cv_wait(&si_cv, &si_mtx);
167 continue;
168 }
169 wrkidle--;
170 if (__predict_false(wrkidle == 0))
171 makeworker(false);
172 mutex_exit(&si_mtx);
173
174 if (!mpsafe)
175 KERNEL_LOCK(1, curlwp);
176 func(funarg);
177 if (!mpsafe)
178 KERNEL_UNLOCK_ONE(curlwp);
179
180 mutex_enter(&si_mtx);
181 wrkidle++;
182 }
183 }
184
185 void
186 softint_init(struct cpu_info *ci)
187 {
188 int rv;
189
190 mutex_init(&si_mtx, MUTEX_DEFAULT, IPL_NONE);
191 cv_init(&si_cv, "intrw8"); /* cv of temporary w8ness */
192
193 rumpuser_cv_init(&clockcv);
194 rumpuser_mutex_init(&clockmtx);
195
196 /* XXX: should have separate "wanttimer" control */
197 if (rump_threads) {
198 rumpuser_mutex_enter(clockmtx);
199 rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, doclock,
200 NULL, NULL, "rumpclk");
201 if (rv)
202 panic("clock thread creation failed: %d", rv);
203 mutex_enter(&si_mtx);
204 while (wrktotal < INTRTHREAD_DEFAULT) {
205 makeworker(true);
206 }
207 mutex_exit(&si_mtx);
208
209 /* make sure we have a clocktime before returning */
210 rumpuser_cv_wait(clockcv, clockmtx);
211 rumpuser_mutex_exit(clockmtx);
212 }
213 }
214
215 /*
216 * Soft interrupts bring two choices. If we are running with thread
217 * support enabled, defer execution, otherwise execute in place.
218 * See softint_schedule().
219 *
220 * As there is currently no clear concept of when a thread finishes
221 * work (although rump_clear_curlwp() is close), simply execute all
222 * softints in the timer thread. This is probably not the most
223 * efficient method, but good enough for now.
224 */
225 void *
226 softint_establish(u_int flags, void (*func)(void *), void *arg)
227 {
228 struct softint *si;
229
230 si = kmem_alloc(sizeof(*si), KM_SLEEP);
231 si->si_func = func;
232 si->si_arg = arg;
233 si->si_onlist = false;
234 si->si_mpsafe = flags & SOFTINT_MPSAFE;
235
236 return si;
237 }
238
239 void
240 softint_schedule(void *arg)
241 {
242 struct softint *si = arg;
243
244 if (!rump_threads) {
245 si->si_func(si->si_arg);
246 } else {
247 mutex_enter(&si_mtx);
248 if (!si->si_onlist) {
249 LIST_INSERT_HEAD(&si_pending, si, si_entries);
250 si->si_onlist = true;
251 }
252 cv_signal(&si_cv);
253 mutex_exit(&si_mtx);
254 }
255 }
256
257 bool
258 cpu_intr_p(void)
259 {
260
261 return false;
262 }
263