rumpblk.c revision 1.1.4.4 1 1.1.4.4 skrll /* $NetBSD: rumpblk.c,v 1.1.4.4 2009/04/28 07:37:51 skrll Exp $ */
2 1.1.4.2 skrll
3 1.1.4.2 skrll /*
4 1.1.4.2 skrll * Copyright (c) 2009 Antti Kantee. All Rights Reserved.
5 1.1.4.2 skrll *
6 1.1.4.2 skrll * Development of this software was supported by the
7 1.1.4.2 skrll * Finnish Cultural Foundation.
8 1.1.4.2 skrll *
9 1.1.4.2 skrll * Redistribution and use in source and binary forms, with or without
10 1.1.4.2 skrll * modification, are permitted provided that the following conditions
11 1.1.4.2 skrll * are met:
12 1.1.4.2 skrll * 1. Redistributions of source code must retain the above copyright
13 1.1.4.2 skrll * notice, this list of conditions and the following disclaimer.
14 1.1.4.2 skrll * 2. Redistributions in binary form must reproduce the above copyright
15 1.1.4.2 skrll * notice, this list of conditions and the following disclaimer in the
16 1.1.4.2 skrll * documentation and/or other materials provided with the distribution.
17 1.1.4.2 skrll *
18 1.1.4.2 skrll * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 1.1.4.2 skrll * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 1.1.4.2 skrll * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 1.1.4.2 skrll * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 1.1.4.2 skrll * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 1.1.4.2 skrll * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 1.1.4.2 skrll * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 1.1.4.2 skrll * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 1.1.4.2 skrll * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 1.1.4.2 skrll * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 1.1.4.2 skrll * SUCH DAMAGE.
29 1.1.4.2 skrll */
30 1.1.4.2 skrll
31 1.1.4.2 skrll /*
32 1.1.4.2 skrll * Block device emulation. Presents a block device interface and
33 1.1.4.2 skrll * uses rumpuser system calls to satisfy I/O requests.
34 1.1.4.4 skrll *
35 1.1.4.4 skrll * We provide fault injection. The driver can be made to fail
36 1.1.4.4 skrll * I/O occasionally.
37 1.1.4.4 skrll *
38 1.1.4.4 skrll * The driver also provides an optimization for regular files by
39 1.1.4.4 skrll * using memory-mapped I/O. This avoids kernel access for every
40 1.1.4.4 skrll * I/O operation. It also gives finer-grained control of how to
41 1.1.4.4 skrll * flush data. Additionally, in case the rump kernel dumps core,
42 1.1.4.4 skrll * we get way less carnage.
43 1.1.4.2 skrll */
44 1.1.4.2 skrll
45 1.1.4.2 skrll #include <sys/cdefs.h>
46 1.1.4.4 skrll __KERNEL_RCSID(0, "$NetBSD: rumpblk.c,v 1.1.4.4 2009/04/28 07:37:51 skrll Exp $");
47 1.1.4.2 skrll
48 1.1.4.2 skrll #include <sys/param.h>
49 1.1.4.2 skrll #include <sys/buf.h>
50 1.1.4.2 skrll #include <sys/conf.h>
51 1.1.4.4 skrll #include <sys/condvar.h>
52 1.1.4.2 skrll #include <sys/disklabel.h>
53 1.1.4.4 skrll #include <sys/evcnt.h>
54 1.1.4.2 skrll #include <sys/fcntl.h>
55 1.1.4.2 skrll #include <sys/kmem.h>
56 1.1.4.2 skrll #include <sys/malloc.h>
57 1.1.4.4 skrll #include <sys/queue.h>
58 1.1.4.2 skrll #include <sys/stat.h>
59 1.1.4.2 skrll
60 1.1.4.2 skrll #include <rump/rumpuser.h>
61 1.1.4.2 skrll
62 1.1.4.2 skrll #include "rump_private.h"
63 1.1.4.2 skrll #include "rump_vfs_private.h"
64 1.1.4.2 skrll
65 1.1.4.4 skrll #if 0
66 1.1.4.4 skrll #define DPRINTF(x) printf x
67 1.1.4.4 skrll #else
68 1.1.4.4 skrll #define DPRINTF(x)
69 1.1.4.4 skrll #endif
70 1.1.4.4 skrll
71 1.1.4.4 skrll /* Default: 16 x 1MB windows */
72 1.1.4.4 skrll unsigned memwinsize = (1<<20);
73 1.1.4.4 skrll unsigned memwincnt = 16;
74 1.1.4.4 skrll
75 1.1.4.4 skrll #define STARTWIN(off) ((off) & ~(memwinsize-1))
76 1.1.4.4 skrll #define INWIN(win,off) ((win)->win_off == STARTWIN(off))
77 1.1.4.4 skrll #define WINSIZE(rblk, win) (MIN((rblk->rblk_size-win->win_off),memwinsize))
78 1.1.4.4 skrll #define WINVALID(win) ((win)->win_off != (off_t)-1)
79 1.1.4.4 skrll #define WINVALIDATE(win) ((win)->win_off = (off_t)-1)
80 1.1.4.4 skrll struct blkwin {
81 1.1.4.4 skrll off_t win_off;
82 1.1.4.4 skrll void *win_mem;
83 1.1.4.4 skrll int win_refcnt;
84 1.1.4.4 skrll
85 1.1.4.4 skrll TAILQ_ENTRY(blkwin) win_lru;
86 1.1.4.4 skrll };
87 1.1.4.4 skrll
88 1.1.4.2 skrll #define RUMPBLK_SIZE 16
89 1.1.4.2 skrll static struct rblkdev {
90 1.1.4.2 skrll char *rblk_path;
91 1.1.4.2 skrll int rblk_fd;
92 1.1.4.2 skrll
93 1.1.4.4 skrll /* for mmap */
94 1.1.4.4 skrll int rblk_mmflags;
95 1.1.4.4 skrll kmutex_t rblk_memmtx;
96 1.1.4.4 skrll kcondvar_t rblk_memcv;
97 1.1.4.4 skrll TAILQ_HEAD(winlru, blkwin) rblk_lruq;
98 1.1.4.4 skrll size_t rblk_size;
99 1.1.4.4 skrll bool rblk_waiting;
100 1.1.4.4 skrll
101 1.1.4.2 skrll struct partition *rblk_curpi;
102 1.1.4.2 skrll struct partition rblk_pi;
103 1.1.4.2 skrll struct disklabel rblk_dl;
104 1.1.4.2 skrll } minors[RUMPBLK_SIZE];
105 1.1.4.2 skrll
106 1.1.4.4 skrll static struct evcnt memblk_ev_reqs;
107 1.1.4.4 skrll static struct evcnt memblk_ev_hits;
108 1.1.4.4 skrll static struct evcnt memblk_ev_busy;
109 1.1.4.4 skrll
110 1.1.4.2 skrll dev_type_open(rumpblk_open);
111 1.1.4.2 skrll dev_type_close(rumpblk_close);
112 1.1.4.2 skrll dev_type_read(rumpblk_read);
113 1.1.4.2 skrll dev_type_write(rumpblk_write);
114 1.1.4.2 skrll dev_type_ioctl(rumpblk_ioctl);
115 1.1.4.2 skrll dev_type_strategy(rumpblk_strategy);
116 1.1.4.3 skrll dev_type_strategy(rumpblk_strategy_fail);
117 1.1.4.2 skrll dev_type_dump(rumpblk_dump);
118 1.1.4.2 skrll dev_type_size(rumpblk_size);
119 1.1.4.2 skrll
120 1.1.4.2 skrll static const struct bdevsw rumpblk_bdevsw = {
121 1.1.4.2 skrll rumpblk_open, rumpblk_close, rumpblk_strategy, rumpblk_ioctl,
122 1.1.4.2 skrll nodump, nosize, D_DISK
123 1.1.4.2 skrll };
124 1.1.4.2 skrll
125 1.1.4.3 skrll static const struct bdevsw rumpblk_bdevsw_fail = {
126 1.1.4.3 skrll rumpblk_open, rumpblk_close, rumpblk_strategy_fail, rumpblk_ioctl,
127 1.1.4.3 skrll nodump, nosize, D_DISK
128 1.1.4.3 skrll };
129 1.1.4.3 skrll
130 1.1.4.2 skrll static const struct cdevsw rumpblk_cdevsw = {
131 1.1.4.2 skrll rumpblk_open, rumpblk_close, rumpblk_read, rumpblk_write,
132 1.1.4.2 skrll rumpblk_ioctl, nostop, notty, nopoll, nommap, nokqfilter, D_DISK
133 1.1.4.2 skrll };
134 1.1.4.2 skrll
135 1.1.4.3 skrll /* fail every n out of BLKFAIL_MAX */
136 1.1.4.3 skrll #define BLKFAIL_MAX 10000
137 1.1.4.3 skrll static int blkfail;
138 1.1.4.3 skrll static unsigned randstate;
139 1.1.4.2 skrll
140 1.1.4.4 skrll static struct blkwin *
141 1.1.4.4 skrll getwindow(struct rblkdev *rblk, off_t off, int *wsize, int *error)
142 1.1.4.4 skrll {
143 1.1.4.4 skrll struct blkwin *win;
144 1.1.4.4 skrll
145 1.1.4.4 skrll mutex_enter(&rblk->rblk_memmtx);
146 1.1.4.4 skrll memblk_ev_reqs.ev_count++;
147 1.1.4.4 skrll retry:
148 1.1.4.4 skrll /* search for window */
149 1.1.4.4 skrll TAILQ_FOREACH(win, &rblk->rblk_lruq, win_lru) {
150 1.1.4.4 skrll if (INWIN(win, off) && WINVALID(win))
151 1.1.4.4 skrll break;
152 1.1.4.4 skrll }
153 1.1.4.4 skrll
154 1.1.4.4 skrll /* found? return */
155 1.1.4.4 skrll if (win) {
156 1.1.4.4 skrll memblk_ev_hits.ev_count++;
157 1.1.4.4 skrll TAILQ_REMOVE(&rblk->rblk_lruq, win, win_lru);
158 1.1.4.4 skrll goto good;
159 1.1.4.4 skrll }
160 1.1.4.4 skrll
161 1.1.4.4 skrll /*
162 1.1.4.4 skrll * Else, create new window. If the least recently used is not
163 1.1.4.4 skrll * currently in use, reuse that. Otherwise we need to wait.
164 1.1.4.4 skrll */
165 1.1.4.4 skrll win = TAILQ_LAST(&rblk->rblk_lruq, winlru);
166 1.1.4.4 skrll if (win->win_refcnt == 0) {
167 1.1.4.4 skrll TAILQ_REMOVE(&rblk->rblk_lruq, win, win_lru);
168 1.1.4.4 skrll mutex_exit(&rblk->rblk_memmtx);
169 1.1.4.4 skrll
170 1.1.4.4 skrll if (WINVALID(win)) {
171 1.1.4.4 skrll DPRINTF(("win %p, unmap mem %p, off 0x%" PRIx64 "\n",
172 1.1.4.4 skrll win, win->win_mem, win->win_off));
173 1.1.4.4 skrll rumpuser_unmap(win->win_mem, WINSIZE(rblk, win));
174 1.1.4.4 skrll WINVALIDATE(win);
175 1.1.4.4 skrll }
176 1.1.4.4 skrll
177 1.1.4.4 skrll win->win_off = STARTWIN(off);
178 1.1.4.4 skrll win->win_mem = rumpuser_filemmap(rblk->rblk_fd, win->win_off,
179 1.1.4.4 skrll WINSIZE(rblk, win), rblk->rblk_mmflags, error);
180 1.1.4.4 skrll DPRINTF(("win %p, off 0x%" PRIx64 ", mem %p\n",
181 1.1.4.4 skrll win, win->win_off, win->win_mem));
182 1.1.4.4 skrll
183 1.1.4.4 skrll mutex_enter(&rblk->rblk_memmtx);
184 1.1.4.4 skrll if (win->win_mem == NULL) {
185 1.1.4.4 skrll WINVALIDATE(win);
186 1.1.4.4 skrll TAILQ_INSERT_TAIL(&rblk->rblk_lruq, win, win_lru);
187 1.1.4.4 skrll mutex_exit(&rblk->rblk_memmtx);
188 1.1.4.4 skrll return NULL;
189 1.1.4.4 skrll }
190 1.1.4.4 skrll } else {
191 1.1.4.4 skrll DPRINTF(("memwin wait\n"));
192 1.1.4.4 skrll memblk_ev_busy.ev_count++;
193 1.1.4.4 skrll
194 1.1.4.4 skrll rblk->rblk_waiting = true;
195 1.1.4.4 skrll cv_wait(&rblk->rblk_memcv, &rblk->rblk_memmtx);
196 1.1.4.4 skrll goto retry;
197 1.1.4.4 skrll }
198 1.1.4.4 skrll
199 1.1.4.4 skrll good:
200 1.1.4.4 skrll KASSERT(win);
201 1.1.4.4 skrll win->win_refcnt++;
202 1.1.4.4 skrll TAILQ_INSERT_HEAD(&rblk->rblk_lruq, win, win_lru);
203 1.1.4.4 skrll mutex_exit(&rblk->rblk_memmtx);
204 1.1.4.4 skrll *wsize = MIN(*wsize, memwinsize - (off-win->win_off));
205 1.1.4.4 skrll KASSERT(*wsize);
206 1.1.4.4 skrll
207 1.1.4.4 skrll return win;
208 1.1.4.4 skrll }
209 1.1.4.4 skrll
210 1.1.4.4 skrll static void
211 1.1.4.4 skrll putwindow(struct rblkdev *rblk, struct blkwin *win)
212 1.1.4.4 skrll {
213 1.1.4.4 skrll
214 1.1.4.4 skrll mutex_enter(&rblk->rblk_memmtx);
215 1.1.4.4 skrll if (--win->win_refcnt == 0 && rblk->rblk_waiting) {
216 1.1.4.4 skrll rblk->rblk_waiting = false;
217 1.1.4.4 skrll cv_signal(&rblk->rblk_memcv);
218 1.1.4.4 skrll }
219 1.1.4.4 skrll KASSERT(win->win_refcnt >= 0);
220 1.1.4.4 skrll mutex_exit(&rblk->rblk_memmtx);
221 1.1.4.4 skrll }
222 1.1.4.4 skrll
223 1.1.4.4 skrll static void
224 1.1.4.4 skrll wincleanup(struct rblkdev *rblk)
225 1.1.4.4 skrll {
226 1.1.4.4 skrll struct blkwin *win;
227 1.1.4.4 skrll
228 1.1.4.4 skrll while ((win = TAILQ_FIRST(&rblk->rblk_lruq)) != NULL) {
229 1.1.4.4 skrll TAILQ_REMOVE(&rblk->rblk_lruq, win, win_lru);
230 1.1.4.4 skrll if (WINVALID(win)) {
231 1.1.4.4 skrll DPRINTF(("cleanup win %p addr %p\n",
232 1.1.4.4 skrll win, win->win_mem));
233 1.1.4.4 skrll rumpuser_unmap(win->win_mem, WINSIZE(rblk, win));
234 1.1.4.4 skrll }
235 1.1.4.4 skrll kmem_free(win, sizeof(*win));
236 1.1.4.4 skrll }
237 1.1.4.4 skrll rblk->rblk_mmflags = 0;
238 1.1.4.4 skrll }
239 1.1.4.4 skrll
240 1.1.4.2 skrll int
241 1.1.4.4 skrll rumpblk_init(void)
242 1.1.4.2 skrll {
243 1.1.4.3 skrll char buf[64];
244 1.1.4.2 skrll int rumpblk = RUMPBLK;
245 1.1.4.4 skrll unsigned tmp;
246 1.1.4.4 skrll int error, i;
247 1.1.4.3 skrll
248 1.1.4.3 skrll if (rumpuser_getenv("RUMP_BLKFAIL", buf, sizeof(buf), &error) == 0) {
249 1.1.4.3 skrll blkfail = strtoul(buf, NULL, 10);
250 1.1.4.3 skrll /* fail everything */
251 1.1.4.3 skrll if (blkfail > BLKFAIL_MAX)
252 1.1.4.3 skrll blkfail = BLKFAIL_MAX;
253 1.1.4.3 skrll if (rumpuser_getenv("RUMP_BLKFAIL_SEED", buf, sizeof(buf),
254 1.1.4.3 skrll &error) == 0) {
255 1.1.4.3 skrll randstate = strtoul(buf, NULL, 10);
256 1.1.4.3 skrll } else {
257 1.1.4.3 skrll randstate = arc4random(); /* XXX: not enough entropy */
258 1.1.4.3 skrll }
259 1.1.4.3 skrll printf("rumpblk: FAULT INJECTION ACTIVE! every %d out of"
260 1.1.4.3 skrll " %d I/O will fail. key %u\n", blkfail, BLKFAIL_MAX,
261 1.1.4.3 skrll randstate);
262 1.1.4.3 skrll } else {
263 1.1.4.3 skrll blkfail = 0;
264 1.1.4.3 skrll }
265 1.1.4.2 skrll
266 1.1.4.4 skrll if (rumpuser_getenv("RUMP_BLKWINSIZE", buf, sizeof(buf), &error) == 0) {
267 1.1.4.4 skrll printf("rumpblk: ");
268 1.1.4.4 skrll tmp = strtoul(buf, NULL, 10);
269 1.1.4.4 skrll if (tmp && !(tmp & (tmp-1)))
270 1.1.4.4 skrll memwinsize = tmp;
271 1.1.4.4 skrll else
272 1.1.4.4 skrll printf("invalid RUMP_BLKWINSIZE %d, ", tmp);
273 1.1.4.4 skrll printf("using %d for memwinsize\n", memwinsize);
274 1.1.4.4 skrll }
275 1.1.4.4 skrll if (rumpuser_getenv("RUMP_BLKWINCOUNT", buf, sizeof(buf), &error) == 0){
276 1.1.4.4 skrll printf("rumpblk: ");
277 1.1.4.4 skrll tmp = strtoul(buf, NULL, 10);
278 1.1.4.4 skrll if (tmp)
279 1.1.4.4 skrll memwincnt = tmp;
280 1.1.4.4 skrll else
281 1.1.4.4 skrll printf("invalid RUMP_BLKWINCOUNT %d, ", tmp);
282 1.1.4.4 skrll printf("using %d for memwincount\n", memwincnt);
283 1.1.4.4 skrll }
284 1.1.4.4 skrll
285 1.1.4.4 skrll memset(minors, 0, sizeof(minors));
286 1.1.4.4 skrll for (i = 0; i < RUMPBLK_SIZE; i++) {
287 1.1.4.4 skrll mutex_init(&minors[i].rblk_memmtx, MUTEX_DEFAULT, IPL_NONE);
288 1.1.4.4 skrll cv_init(&minors[i].rblk_memcv, "rblkmcv");
289 1.1.4.4 skrll }
290 1.1.4.4 skrll
291 1.1.4.4 skrll evcnt_attach_dynamic(&memblk_ev_reqs, EVCNT_TYPE_MISC, NULL,
292 1.1.4.4 skrll "rumpblk", "memblk requests");
293 1.1.4.4 skrll evcnt_attach_dynamic(&memblk_ev_hits, EVCNT_TYPE_MISC, NULL,
294 1.1.4.4 skrll "rumpblk", "memblk window hits");
295 1.1.4.4 skrll evcnt_attach_dynamic(&memblk_ev_busy, EVCNT_TYPE_MISC, NULL,
296 1.1.4.4 skrll "rumpblk", "memblk all windows busy");
297 1.1.4.4 skrll
298 1.1.4.3 skrll if (blkfail) {
299 1.1.4.3 skrll return devsw_attach("rumpblk", &rumpblk_bdevsw_fail, &rumpblk,
300 1.1.4.3 skrll &rumpblk_cdevsw, &rumpblk);
301 1.1.4.3 skrll } else {
302 1.1.4.3 skrll return devsw_attach("rumpblk", &rumpblk_bdevsw, &rumpblk,
303 1.1.4.3 skrll &rumpblk_cdevsw, &rumpblk);
304 1.1.4.3 skrll }
305 1.1.4.2 skrll }
306 1.1.4.2 skrll
307 1.1.4.2 skrll int
308 1.1.4.2 skrll rumpblk_register(const char *path)
309 1.1.4.2 skrll {
310 1.1.4.2 skrll size_t len;
311 1.1.4.2 skrll int i;
312 1.1.4.2 skrll
313 1.1.4.2 skrll for (i = 0; i < RUMPBLK_SIZE; i++)
314 1.1.4.2 skrll if (minors[i].rblk_path && strcmp(minors[i].rblk_path, path)==0)
315 1.1.4.2 skrll return i;
316 1.1.4.2 skrll
317 1.1.4.2 skrll for (i = 0; i < RUMPBLK_SIZE; i++)
318 1.1.4.2 skrll if (minors[i].rblk_path == NULL)
319 1.1.4.2 skrll break;
320 1.1.4.2 skrll if (i == RUMPBLK_SIZE)
321 1.1.4.2 skrll return -1;
322 1.1.4.2 skrll
323 1.1.4.2 skrll len = strlen(path);
324 1.1.4.4 skrll minors[i].rblk_path = malloc(len + 1, M_TEMP, M_WAITOK);
325 1.1.4.2 skrll strcpy(minors[i].rblk_path, path);
326 1.1.4.2 skrll minors[i].rblk_fd = -1;
327 1.1.4.2 skrll return i;
328 1.1.4.2 skrll }
329 1.1.4.2 skrll
330 1.1.4.2 skrll int
331 1.1.4.2 skrll rumpblk_open(dev_t dev, int flag, int fmt, struct lwp *l)
332 1.1.4.2 skrll {
333 1.1.4.2 skrll struct rblkdev *rblk = &minors[minor(dev)];
334 1.1.4.3 skrll uint64_t fsize;
335 1.1.4.4 skrll int ft, dummy;
336 1.1.4.2 skrll int error, fd;
337 1.1.4.2 skrll
338 1.1.4.2 skrll KASSERT(rblk->rblk_fd == -1);
339 1.1.4.2 skrll fd = rumpuser_open(rblk->rblk_path, OFLAGS(flag), &error);
340 1.1.4.2 skrll if (error)
341 1.1.4.2 skrll return error;
342 1.1.4.2 skrll
343 1.1.4.4 skrll if (rumpuser_getfileinfo(rblk->rblk_path, &fsize, &ft, &error) == -1) {
344 1.1.4.4 skrll rumpuser_close(fd, &dummy);
345 1.1.4.4 skrll return error;
346 1.1.4.4 skrll }
347 1.1.4.4 skrll
348 1.1.4.4 skrll if (ft == RUMPUSER_FT_REG) {
349 1.1.4.4 skrll struct blkwin *win;
350 1.1.4.4 skrll int i, winsize;
351 1.1.4.4 skrll
352 1.1.4.2 skrll /*
353 1.1.4.4 skrll * Use mmap to access a regular file. Allocate and
354 1.1.4.4 skrll * cache initial windows here. Failure to allocate one
355 1.1.4.4 skrll * means fallback to read/write i/o.
356 1.1.4.2 skrll */
357 1.1.4.2 skrll
358 1.1.4.4 skrll rblk->rblk_mmflags = 0;
359 1.1.4.4 skrll if (flag & FREAD)
360 1.1.4.4 skrll rblk->rblk_mmflags |= RUMPUSER_FILEMMAP_READ;
361 1.1.4.4 skrll if (flag & FWRITE) {
362 1.1.4.4 skrll rblk->rblk_mmflags |= RUMPUSER_FILEMMAP_WRITE;
363 1.1.4.4 skrll rblk->rblk_mmflags |= RUMPUSER_FILEMMAP_SHARED;
364 1.1.4.4 skrll }
365 1.1.4.2 skrll
366 1.1.4.4 skrll TAILQ_INIT(&rblk->rblk_lruq);
367 1.1.4.4 skrll rblk->rblk_size = fsize;
368 1.1.4.4 skrll rblk->rblk_fd = fd;
369 1.1.4.4 skrll
370 1.1.4.4 skrll for (i = 0; i < memwincnt && i * memwinsize < fsize; i++) {
371 1.1.4.4 skrll win = kmem_zalloc(sizeof(*win), KM_SLEEP);
372 1.1.4.4 skrll WINVALIDATE(win);
373 1.1.4.4 skrll TAILQ_INSERT_TAIL(&rblk->rblk_lruq, win, win_lru);
374 1.1.4.4 skrll
375 1.1.4.4 skrll /*
376 1.1.4.4 skrll * Allocate first windows. Here we just generally
377 1.1.4.4 skrll * make sure a) we can mmap at all b) we have the
378 1.1.4.4 skrll * necessary VA available
379 1.1.4.4 skrll */
380 1.1.4.4 skrll winsize = 1;
381 1.1.4.4 skrll win = getwindow(rblk, i*memwinsize, &winsize, &error);
382 1.1.4.4 skrll if (win) {
383 1.1.4.4 skrll putwindow(rblk, win);
384 1.1.4.4 skrll } else {
385 1.1.4.4 skrll wincleanup(rblk);
386 1.1.4.4 skrll break;
387 1.1.4.4 skrll }
388 1.1.4.2 skrll }
389 1.1.4.4 skrll
390 1.1.4.4 skrll memset(&rblk->rblk_dl, 0, sizeof(rblk->rblk_dl));
391 1.1.4.3 skrll rblk->rblk_pi.p_size = fsize >> DEV_BSHIFT;
392 1.1.4.2 skrll rblk->rblk_dl.d_secsize = DEV_BSIZE;
393 1.1.4.2 skrll rblk->rblk_curpi = &rblk->rblk_pi;
394 1.1.4.4 skrll } else {
395 1.1.4.4 skrll if (rumpuser_ioctl(fd, DIOCGDINFO, &rblk->rblk_dl,
396 1.1.4.4 skrll &error) == -1) {
397 1.1.4.4 skrll KASSERT(error);
398 1.1.4.4 skrll rumpuser_close(fd, &dummy);
399 1.1.4.4 skrll return error;
400 1.1.4.4 skrll }
401 1.1.4.4 skrll
402 1.1.4.4 skrll rblk->rblk_fd = fd;
403 1.1.4.4 skrll rblk->rblk_curpi = &rblk->rblk_dl.d_partitions[0];
404 1.1.4.2 skrll }
405 1.1.4.2 skrll
406 1.1.4.4 skrll KASSERT(rblk->rblk_fd != -1);
407 1.1.4.2 skrll return 0;
408 1.1.4.2 skrll }
409 1.1.4.2 skrll
410 1.1.4.2 skrll int
411 1.1.4.2 skrll rumpblk_close(dev_t dev, int flag, int fmt, struct lwp *l)
412 1.1.4.2 skrll {
413 1.1.4.2 skrll struct rblkdev *rblk = &minors[minor(dev)];
414 1.1.4.2 skrll int dummy;
415 1.1.4.2 skrll
416 1.1.4.4 skrll if (rblk->rblk_mmflags)
417 1.1.4.4 skrll wincleanup(rblk);
418 1.1.4.4 skrll rumpuser_fsync(rblk->rblk_fd, &dummy);
419 1.1.4.2 skrll rumpuser_close(rblk->rblk_fd, &dummy);
420 1.1.4.2 skrll rblk->rblk_fd = -1;
421 1.1.4.2 skrll
422 1.1.4.2 skrll return 0;
423 1.1.4.2 skrll }
424 1.1.4.2 skrll
425 1.1.4.2 skrll int
426 1.1.4.2 skrll rumpblk_ioctl(dev_t dev, u_long xfer, void *addr, int flag, struct lwp *l)
427 1.1.4.2 skrll {
428 1.1.4.2 skrll struct rblkdev *rblk = &minors[minor(dev)];
429 1.1.4.2 skrll int rv, error;
430 1.1.4.2 skrll
431 1.1.4.2 skrll if (xfer == DIOCGPART) {
432 1.1.4.2 skrll struct partinfo *pi = (struct partinfo *)addr;
433 1.1.4.2 skrll
434 1.1.4.2 skrll pi->part = rblk->rblk_curpi;
435 1.1.4.2 skrll pi->disklab = &rblk->rblk_dl;
436 1.1.4.2 skrll
437 1.1.4.2 skrll return 0;
438 1.1.4.2 skrll }
439 1.1.4.2 skrll
440 1.1.4.2 skrll rv = rumpuser_ioctl(rblk->rblk_fd, xfer, addr, &error);
441 1.1.4.2 skrll if (rv == -1)
442 1.1.4.2 skrll return error;
443 1.1.4.2 skrll
444 1.1.4.2 skrll return 0;
445 1.1.4.2 skrll }
446 1.1.4.2 skrll
447 1.1.4.2 skrll int
448 1.1.4.2 skrll rumpblk_read(dev_t dev, struct uio *uio, int flags)
449 1.1.4.2 skrll {
450 1.1.4.2 skrll
451 1.1.4.2 skrll panic("%s: unimplemented", __func__);
452 1.1.4.2 skrll }
453 1.1.4.2 skrll
454 1.1.4.2 skrll int
455 1.1.4.2 skrll rumpblk_write(dev_t dev, struct uio *uio, int flags)
456 1.1.4.2 skrll {
457 1.1.4.2 skrll
458 1.1.4.2 skrll panic("%s: unimplemented", __func__);
459 1.1.4.2 skrll }
460 1.1.4.2 skrll
461 1.1.4.3 skrll static void
462 1.1.4.3 skrll dostrategy(struct buf *bp)
463 1.1.4.2 skrll {
464 1.1.4.2 skrll struct rblkdev *rblk = &minors[minor(bp->b_dev)];
465 1.1.4.2 skrll off_t off;
466 1.1.4.4 skrll int async, error;
467 1.1.4.2 skrll
468 1.1.4.2 skrll off = bp->b_blkno << DEV_BSHIFT;
469 1.1.4.4 skrll /*
470 1.1.4.4 skrll * Do bounds checking if we're working on a file. Otherwise
471 1.1.4.4 skrll * invalid file systems might attempt to read beyond EOF. This
472 1.1.4.4 skrll * is bad(tm) especially on mmapped images. This is essentially
473 1.1.4.4 skrll * the kernel bounds_check() routines.
474 1.1.4.4 skrll */
475 1.1.4.4 skrll if (rblk->rblk_size && off + bp->b_bcount > rblk->rblk_size) {
476 1.1.4.4 skrll int64_t sz = rblk->rblk_size - off;
477 1.1.4.4 skrll
478 1.1.4.4 skrll /* EOF */
479 1.1.4.4 skrll if (sz == 0) {
480 1.1.4.4 skrll rump_biodone(bp, 0, 0);
481 1.1.4.4 skrll return;
482 1.1.4.4 skrll }
483 1.1.4.4 skrll /* beyond EOF ==> error */
484 1.1.4.4 skrll if (sz < 0) {
485 1.1.4.4 skrll rump_biodone(bp, 0, EINVAL);
486 1.1.4.4 skrll return;
487 1.1.4.4 skrll }
488 1.1.4.4 skrll
489 1.1.4.4 skrll /* truncate to device size */
490 1.1.4.4 skrll bp->b_bcount = sz;
491 1.1.4.4 skrll }
492 1.1.4.4 skrll
493 1.1.4.4 skrll async = bp->b_flags & B_ASYNC;
494 1.1.4.2 skrll DPRINTF(("rumpblk_strategy: 0x%x bytes %s off 0x%" PRIx64
495 1.1.4.4 skrll " (0x%" PRIx64 " - 0x%" PRIx64 ")\n",
496 1.1.4.4 skrll bp->b_bcount, BUF_ISREAD(bp) ? "READ" : "WRITE",
497 1.1.4.2 skrll off, off, (off + bp->b_bcount)));
498 1.1.4.2 skrll
499 1.1.4.4 skrll /* mmap? handle here and return */
500 1.1.4.4 skrll if (rblk->rblk_mmflags) {
501 1.1.4.4 skrll struct blkwin *win;
502 1.1.4.4 skrll int winsize, iodone;
503 1.1.4.4 skrll uint8_t *ioaddr, *bufaddr;
504 1.1.4.4 skrll
505 1.1.4.4 skrll for (iodone = 0; iodone < bp->b_bcount;
506 1.1.4.4 skrll iodone += winsize, off += winsize) {
507 1.1.4.4 skrll winsize = bp->b_bcount - iodone;
508 1.1.4.4 skrll win = getwindow(rblk, off, &winsize, &error);
509 1.1.4.4 skrll if (win == NULL) {
510 1.1.4.4 skrll rump_biodone(bp, iodone, error);
511 1.1.4.4 skrll return;
512 1.1.4.4 skrll }
513 1.1.4.4 skrll
514 1.1.4.4 skrll ioaddr = (uint8_t *)win->win_mem + (off-STARTWIN(off));
515 1.1.4.4 skrll bufaddr = (uint8_t *)bp->b_data + iodone;
516 1.1.4.4 skrll
517 1.1.4.4 skrll DPRINTF(("strat: %p off 0x%" PRIx64
518 1.1.4.4 skrll ", ioaddr %p (%p)/buf %p\n", win,
519 1.1.4.4 skrll win->win_off, ioaddr, win->win_mem, bufaddr));
520 1.1.4.4 skrll if (BUF_ISREAD(bp)) {
521 1.1.4.4 skrll memcpy(bufaddr, ioaddr, winsize);
522 1.1.4.4 skrll } else {
523 1.1.4.4 skrll memcpy(ioaddr, bufaddr, winsize);
524 1.1.4.4 skrll }
525 1.1.4.4 skrll
526 1.1.4.4 skrll /* synchronous write, sync bits back to disk */
527 1.1.4.4 skrll if (BUF_ISWRITE(bp) && !async) {
528 1.1.4.4 skrll rumpuser_memsync(ioaddr, winsize, &error);
529 1.1.4.4 skrll }
530 1.1.4.4 skrll putwindow(rblk, win);
531 1.1.4.4 skrll }
532 1.1.4.4 skrll
533 1.1.4.4 skrll rump_biodone(bp, bp->b_bcount, 0);
534 1.1.4.4 skrll return;
535 1.1.4.4 skrll }
536 1.1.4.4 skrll
537 1.1.4.4 skrll
538 1.1.4.2 skrll /*
539 1.1.4.2 skrll * Do I/O. We have different paths for async and sync I/O.
540 1.1.4.2 skrll * Async I/O is done by passing a request to rumpuser where
541 1.1.4.2 skrll * it is executed. The rumpuser routine then calls
542 1.1.4.2 skrll * biodone() to signal any waiters in the kernel. I/O's are
543 1.1.4.2 skrll * executed in series. Technically executing them in parallel
544 1.1.4.2 skrll * would produce better results, but then we'd need either
545 1.1.4.2 skrll * more threads or posix aio. Maybe worth investigating
546 1.1.4.2 skrll * this later.
547 1.1.4.4 skrll *
548 1.1.4.4 skrll * Using bufq here might be a good idea.
549 1.1.4.2 skrll */
550 1.1.4.4 skrll if (rump_threads) {
551 1.1.4.2 skrll struct rumpuser_aio *rua;
552 1.1.4.2 skrll
553 1.1.4.2 skrll rumpuser_mutex_enter(&rumpuser_aio_mtx);
554 1.1.4.4 skrll while ((rumpuser_aio_head+1) % N_AIOS == rumpuser_aio_tail)
555 1.1.4.4 skrll rumpuser_cv_wait(&rumpuser_aio_cv, &rumpuser_aio_mtx);
556 1.1.4.2 skrll
557 1.1.4.3 skrll rua = &rumpuser_aios[rumpuser_aio_head];
558 1.1.4.3 skrll KASSERT(rua->rua_bp == NULL);
559 1.1.4.3 skrll rua->rua_fd = rblk->rblk_fd;
560 1.1.4.3 skrll rua->rua_data = bp->b_data;
561 1.1.4.3 skrll rua->rua_dlen = bp->b_bcount;
562 1.1.4.3 skrll rua->rua_off = off;
563 1.1.4.3 skrll rua->rua_bp = bp;
564 1.1.4.3 skrll rua->rua_op = BUF_ISREAD(bp);
565 1.1.4.3 skrll
566 1.1.4.2 skrll /* insert into queue & signal */
567 1.1.4.3 skrll rumpuser_aio_head = (rumpuser_aio_head+1) % N_AIOS;
568 1.1.4.2 skrll rumpuser_cv_signal(&rumpuser_aio_cv);
569 1.1.4.2 skrll rumpuser_mutex_exit(&rumpuser_aio_mtx);
570 1.1.4.4 skrll
571 1.1.4.4 skrll /* make sure non-async writes end up on backing media */
572 1.1.4.4 skrll if (BUF_ISWRITE(bp) && !async) {
573 1.1.4.4 skrll biowait(bp);
574 1.1.4.4 skrll rumpuser_fsync(rblk->rblk_fd, &error);
575 1.1.4.4 skrll }
576 1.1.4.2 skrll } else {
577 1.1.4.2 skrll if (BUF_ISREAD(bp)) {
578 1.1.4.2 skrll rumpuser_read_bio(rblk->rblk_fd, bp->b_data,
579 1.1.4.2 skrll bp->b_bcount, off, rump_biodone, bp);
580 1.1.4.2 skrll } else {
581 1.1.4.2 skrll rumpuser_write_bio(rblk->rblk_fd, bp->b_data,
582 1.1.4.2 skrll bp->b_bcount, off, rump_biodone, bp);
583 1.1.4.2 skrll }
584 1.1.4.2 skrll if (!async) {
585 1.1.4.2 skrll if (BUF_ISWRITE(bp))
586 1.1.4.2 skrll rumpuser_fsync(rblk->rblk_fd, &error);
587 1.1.4.2 skrll }
588 1.1.4.2 skrll }
589 1.1.4.2 skrll }
590 1.1.4.3 skrll
591 1.1.4.3 skrll void
592 1.1.4.3 skrll rumpblk_strategy(struct buf *bp)
593 1.1.4.3 skrll {
594 1.1.4.3 skrll
595 1.1.4.3 skrll dostrategy(bp);
596 1.1.4.3 skrll }
597 1.1.4.3 skrll
598 1.1.4.3 skrll /*
599 1.1.4.3 skrll * Simple random number generator. This is private so that we can
600 1.1.4.3 skrll * very repeatedly control which blocks will fail.
601 1.1.4.3 skrll *
602 1.1.4.3 skrll * <mlelstv> pooka, rand()
603 1.1.4.3 skrll * <mlelstv> [paste]
604 1.1.4.3 skrll */
605 1.1.4.3 skrll static unsigned
606 1.1.4.3 skrll gimmerand(void)
607 1.1.4.3 skrll {
608 1.1.4.3 skrll
609 1.1.4.3 skrll return (randstate = randstate * 1103515245 + 12345) % (0x80000000L);
610 1.1.4.3 skrll }
611 1.1.4.3 skrll
612 1.1.4.3 skrll /*
613 1.1.4.3 skrll * Block device with very simple fault injection. Fails every
614 1.1.4.3 skrll * n out of BLKFAIL_MAX I/O with EIO. n is determined by the env
615 1.1.4.3 skrll * variable RUMP_BLKFAIL.
616 1.1.4.3 skrll */
617 1.1.4.3 skrll void
618 1.1.4.3 skrll rumpblk_strategy_fail(struct buf *bp)
619 1.1.4.3 skrll {
620 1.1.4.3 skrll
621 1.1.4.3 skrll if (gimmerand() % BLKFAIL_MAX >= blkfail) {
622 1.1.4.3 skrll dostrategy(bp);
623 1.1.4.3 skrll } else {
624 1.1.4.3 skrll printf("block fault injection: failing I/O on block %lld\n",
625 1.1.4.3 skrll (long long)bp->b_blkno);
626 1.1.4.3 skrll bp->b_error = EIO;
627 1.1.4.3 skrll biodone(bp);
628 1.1.4.3 skrll }
629 1.1.4.3 skrll }
630