rumpblk.c revision 1.1.4.4 1 /* $NetBSD: rumpblk.c,v 1.1.4.4 2009/04/28 07:37:51 skrll Exp $ */
2
3 /*
4 * Copyright (c) 2009 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Finnish Cultural Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /*
32 * Block device emulation. Presents a block device interface and
33 * uses rumpuser system calls to satisfy I/O requests.
34 *
35 * We provide fault injection. The driver can be made to fail
36 * I/O occasionally.
37 *
38 * The driver also provides an optimization for regular files by
39 * using memory-mapped I/O. This avoids kernel access for every
40 * I/O operation. It also gives finer-grained control of how to
41 * flush data. Additionally, in case the rump kernel dumps core,
42 * we get way less carnage.
43 */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: rumpblk.c,v 1.1.4.4 2009/04/28 07:37:51 skrll Exp $");
47
48 #include <sys/param.h>
49 #include <sys/buf.h>
50 #include <sys/conf.h>
51 #include <sys/condvar.h>
52 #include <sys/disklabel.h>
53 #include <sys/evcnt.h>
54 #include <sys/fcntl.h>
55 #include <sys/kmem.h>
56 #include <sys/malloc.h>
57 #include <sys/queue.h>
58 #include <sys/stat.h>
59
60 #include <rump/rumpuser.h>
61
62 #include "rump_private.h"
63 #include "rump_vfs_private.h"
64
65 #if 0
66 #define DPRINTF(x) printf x
67 #else
68 #define DPRINTF(x)
69 #endif
70
71 /* Default: 16 x 1MB windows */
72 unsigned memwinsize = (1<<20);
73 unsigned memwincnt = 16;
74
75 #define STARTWIN(off) ((off) & ~(memwinsize-1))
76 #define INWIN(win,off) ((win)->win_off == STARTWIN(off))
77 #define WINSIZE(rblk, win) (MIN((rblk->rblk_size-win->win_off),memwinsize))
78 #define WINVALID(win) ((win)->win_off != (off_t)-1)
79 #define WINVALIDATE(win) ((win)->win_off = (off_t)-1)
80 struct blkwin {
81 off_t win_off;
82 void *win_mem;
83 int win_refcnt;
84
85 TAILQ_ENTRY(blkwin) win_lru;
86 };
87
88 #define RUMPBLK_SIZE 16
89 static struct rblkdev {
90 char *rblk_path;
91 int rblk_fd;
92
93 /* for mmap */
94 int rblk_mmflags;
95 kmutex_t rblk_memmtx;
96 kcondvar_t rblk_memcv;
97 TAILQ_HEAD(winlru, blkwin) rblk_lruq;
98 size_t rblk_size;
99 bool rblk_waiting;
100
101 struct partition *rblk_curpi;
102 struct partition rblk_pi;
103 struct disklabel rblk_dl;
104 } minors[RUMPBLK_SIZE];
105
106 static struct evcnt memblk_ev_reqs;
107 static struct evcnt memblk_ev_hits;
108 static struct evcnt memblk_ev_busy;
109
110 dev_type_open(rumpblk_open);
111 dev_type_close(rumpblk_close);
112 dev_type_read(rumpblk_read);
113 dev_type_write(rumpblk_write);
114 dev_type_ioctl(rumpblk_ioctl);
115 dev_type_strategy(rumpblk_strategy);
116 dev_type_strategy(rumpblk_strategy_fail);
117 dev_type_dump(rumpblk_dump);
118 dev_type_size(rumpblk_size);
119
120 static const struct bdevsw rumpblk_bdevsw = {
121 rumpblk_open, rumpblk_close, rumpblk_strategy, rumpblk_ioctl,
122 nodump, nosize, D_DISK
123 };
124
125 static const struct bdevsw rumpblk_bdevsw_fail = {
126 rumpblk_open, rumpblk_close, rumpblk_strategy_fail, rumpblk_ioctl,
127 nodump, nosize, D_DISK
128 };
129
130 static const struct cdevsw rumpblk_cdevsw = {
131 rumpblk_open, rumpblk_close, rumpblk_read, rumpblk_write,
132 rumpblk_ioctl, nostop, notty, nopoll, nommap, nokqfilter, D_DISK
133 };
134
135 /* fail every n out of BLKFAIL_MAX */
136 #define BLKFAIL_MAX 10000
137 static int blkfail;
138 static unsigned randstate;
139
140 static struct blkwin *
141 getwindow(struct rblkdev *rblk, off_t off, int *wsize, int *error)
142 {
143 struct blkwin *win;
144
145 mutex_enter(&rblk->rblk_memmtx);
146 memblk_ev_reqs.ev_count++;
147 retry:
148 /* search for window */
149 TAILQ_FOREACH(win, &rblk->rblk_lruq, win_lru) {
150 if (INWIN(win, off) && WINVALID(win))
151 break;
152 }
153
154 /* found? return */
155 if (win) {
156 memblk_ev_hits.ev_count++;
157 TAILQ_REMOVE(&rblk->rblk_lruq, win, win_lru);
158 goto good;
159 }
160
161 /*
162 * Else, create new window. If the least recently used is not
163 * currently in use, reuse that. Otherwise we need to wait.
164 */
165 win = TAILQ_LAST(&rblk->rblk_lruq, winlru);
166 if (win->win_refcnt == 0) {
167 TAILQ_REMOVE(&rblk->rblk_lruq, win, win_lru);
168 mutex_exit(&rblk->rblk_memmtx);
169
170 if (WINVALID(win)) {
171 DPRINTF(("win %p, unmap mem %p, off 0x%" PRIx64 "\n",
172 win, win->win_mem, win->win_off));
173 rumpuser_unmap(win->win_mem, WINSIZE(rblk, win));
174 WINVALIDATE(win);
175 }
176
177 win->win_off = STARTWIN(off);
178 win->win_mem = rumpuser_filemmap(rblk->rblk_fd, win->win_off,
179 WINSIZE(rblk, win), rblk->rblk_mmflags, error);
180 DPRINTF(("win %p, off 0x%" PRIx64 ", mem %p\n",
181 win, win->win_off, win->win_mem));
182
183 mutex_enter(&rblk->rblk_memmtx);
184 if (win->win_mem == NULL) {
185 WINVALIDATE(win);
186 TAILQ_INSERT_TAIL(&rblk->rblk_lruq, win, win_lru);
187 mutex_exit(&rblk->rblk_memmtx);
188 return NULL;
189 }
190 } else {
191 DPRINTF(("memwin wait\n"));
192 memblk_ev_busy.ev_count++;
193
194 rblk->rblk_waiting = true;
195 cv_wait(&rblk->rblk_memcv, &rblk->rblk_memmtx);
196 goto retry;
197 }
198
199 good:
200 KASSERT(win);
201 win->win_refcnt++;
202 TAILQ_INSERT_HEAD(&rblk->rblk_lruq, win, win_lru);
203 mutex_exit(&rblk->rblk_memmtx);
204 *wsize = MIN(*wsize, memwinsize - (off-win->win_off));
205 KASSERT(*wsize);
206
207 return win;
208 }
209
210 static void
211 putwindow(struct rblkdev *rblk, struct blkwin *win)
212 {
213
214 mutex_enter(&rblk->rblk_memmtx);
215 if (--win->win_refcnt == 0 && rblk->rblk_waiting) {
216 rblk->rblk_waiting = false;
217 cv_signal(&rblk->rblk_memcv);
218 }
219 KASSERT(win->win_refcnt >= 0);
220 mutex_exit(&rblk->rblk_memmtx);
221 }
222
223 static void
224 wincleanup(struct rblkdev *rblk)
225 {
226 struct blkwin *win;
227
228 while ((win = TAILQ_FIRST(&rblk->rblk_lruq)) != NULL) {
229 TAILQ_REMOVE(&rblk->rblk_lruq, win, win_lru);
230 if (WINVALID(win)) {
231 DPRINTF(("cleanup win %p addr %p\n",
232 win, win->win_mem));
233 rumpuser_unmap(win->win_mem, WINSIZE(rblk, win));
234 }
235 kmem_free(win, sizeof(*win));
236 }
237 rblk->rblk_mmflags = 0;
238 }
239
240 int
241 rumpblk_init(void)
242 {
243 char buf[64];
244 int rumpblk = RUMPBLK;
245 unsigned tmp;
246 int error, i;
247
248 if (rumpuser_getenv("RUMP_BLKFAIL", buf, sizeof(buf), &error) == 0) {
249 blkfail = strtoul(buf, NULL, 10);
250 /* fail everything */
251 if (blkfail > BLKFAIL_MAX)
252 blkfail = BLKFAIL_MAX;
253 if (rumpuser_getenv("RUMP_BLKFAIL_SEED", buf, sizeof(buf),
254 &error) == 0) {
255 randstate = strtoul(buf, NULL, 10);
256 } else {
257 randstate = arc4random(); /* XXX: not enough entropy */
258 }
259 printf("rumpblk: FAULT INJECTION ACTIVE! every %d out of"
260 " %d I/O will fail. key %u\n", blkfail, BLKFAIL_MAX,
261 randstate);
262 } else {
263 blkfail = 0;
264 }
265
266 if (rumpuser_getenv("RUMP_BLKWINSIZE", buf, sizeof(buf), &error) == 0) {
267 printf("rumpblk: ");
268 tmp = strtoul(buf, NULL, 10);
269 if (tmp && !(tmp & (tmp-1)))
270 memwinsize = tmp;
271 else
272 printf("invalid RUMP_BLKWINSIZE %d, ", tmp);
273 printf("using %d for memwinsize\n", memwinsize);
274 }
275 if (rumpuser_getenv("RUMP_BLKWINCOUNT", buf, sizeof(buf), &error) == 0){
276 printf("rumpblk: ");
277 tmp = strtoul(buf, NULL, 10);
278 if (tmp)
279 memwincnt = tmp;
280 else
281 printf("invalid RUMP_BLKWINCOUNT %d, ", tmp);
282 printf("using %d for memwincount\n", memwincnt);
283 }
284
285 memset(minors, 0, sizeof(minors));
286 for (i = 0; i < RUMPBLK_SIZE; i++) {
287 mutex_init(&minors[i].rblk_memmtx, MUTEX_DEFAULT, IPL_NONE);
288 cv_init(&minors[i].rblk_memcv, "rblkmcv");
289 }
290
291 evcnt_attach_dynamic(&memblk_ev_reqs, EVCNT_TYPE_MISC, NULL,
292 "rumpblk", "memblk requests");
293 evcnt_attach_dynamic(&memblk_ev_hits, EVCNT_TYPE_MISC, NULL,
294 "rumpblk", "memblk window hits");
295 evcnt_attach_dynamic(&memblk_ev_busy, EVCNT_TYPE_MISC, NULL,
296 "rumpblk", "memblk all windows busy");
297
298 if (blkfail) {
299 return devsw_attach("rumpblk", &rumpblk_bdevsw_fail, &rumpblk,
300 &rumpblk_cdevsw, &rumpblk);
301 } else {
302 return devsw_attach("rumpblk", &rumpblk_bdevsw, &rumpblk,
303 &rumpblk_cdevsw, &rumpblk);
304 }
305 }
306
307 int
308 rumpblk_register(const char *path)
309 {
310 size_t len;
311 int i;
312
313 for (i = 0; i < RUMPBLK_SIZE; i++)
314 if (minors[i].rblk_path && strcmp(minors[i].rblk_path, path)==0)
315 return i;
316
317 for (i = 0; i < RUMPBLK_SIZE; i++)
318 if (minors[i].rblk_path == NULL)
319 break;
320 if (i == RUMPBLK_SIZE)
321 return -1;
322
323 len = strlen(path);
324 minors[i].rblk_path = malloc(len + 1, M_TEMP, M_WAITOK);
325 strcpy(minors[i].rblk_path, path);
326 minors[i].rblk_fd = -1;
327 return i;
328 }
329
330 int
331 rumpblk_open(dev_t dev, int flag, int fmt, struct lwp *l)
332 {
333 struct rblkdev *rblk = &minors[minor(dev)];
334 uint64_t fsize;
335 int ft, dummy;
336 int error, fd;
337
338 KASSERT(rblk->rblk_fd == -1);
339 fd = rumpuser_open(rblk->rblk_path, OFLAGS(flag), &error);
340 if (error)
341 return error;
342
343 if (rumpuser_getfileinfo(rblk->rblk_path, &fsize, &ft, &error) == -1) {
344 rumpuser_close(fd, &dummy);
345 return error;
346 }
347
348 if (ft == RUMPUSER_FT_REG) {
349 struct blkwin *win;
350 int i, winsize;
351
352 /*
353 * Use mmap to access a regular file. Allocate and
354 * cache initial windows here. Failure to allocate one
355 * means fallback to read/write i/o.
356 */
357
358 rblk->rblk_mmflags = 0;
359 if (flag & FREAD)
360 rblk->rblk_mmflags |= RUMPUSER_FILEMMAP_READ;
361 if (flag & FWRITE) {
362 rblk->rblk_mmflags |= RUMPUSER_FILEMMAP_WRITE;
363 rblk->rblk_mmflags |= RUMPUSER_FILEMMAP_SHARED;
364 }
365
366 TAILQ_INIT(&rblk->rblk_lruq);
367 rblk->rblk_size = fsize;
368 rblk->rblk_fd = fd;
369
370 for (i = 0; i < memwincnt && i * memwinsize < fsize; i++) {
371 win = kmem_zalloc(sizeof(*win), KM_SLEEP);
372 WINVALIDATE(win);
373 TAILQ_INSERT_TAIL(&rblk->rblk_lruq, win, win_lru);
374
375 /*
376 * Allocate first windows. Here we just generally
377 * make sure a) we can mmap at all b) we have the
378 * necessary VA available
379 */
380 winsize = 1;
381 win = getwindow(rblk, i*memwinsize, &winsize, &error);
382 if (win) {
383 putwindow(rblk, win);
384 } else {
385 wincleanup(rblk);
386 break;
387 }
388 }
389
390 memset(&rblk->rblk_dl, 0, sizeof(rblk->rblk_dl));
391 rblk->rblk_pi.p_size = fsize >> DEV_BSHIFT;
392 rblk->rblk_dl.d_secsize = DEV_BSIZE;
393 rblk->rblk_curpi = &rblk->rblk_pi;
394 } else {
395 if (rumpuser_ioctl(fd, DIOCGDINFO, &rblk->rblk_dl,
396 &error) == -1) {
397 KASSERT(error);
398 rumpuser_close(fd, &dummy);
399 return error;
400 }
401
402 rblk->rblk_fd = fd;
403 rblk->rblk_curpi = &rblk->rblk_dl.d_partitions[0];
404 }
405
406 KASSERT(rblk->rblk_fd != -1);
407 return 0;
408 }
409
410 int
411 rumpblk_close(dev_t dev, int flag, int fmt, struct lwp *l)
412 {
413 struct rblkdev *rblk = &minors[minor(dev)];
414 int dummy;
415
416 if (rblk->rblk_mmflags)
417 wincleanup(rblk);
418 rumpuser_fsync(rblk->rblk_fd, &dummy);
419 rumpuser_close(rblk->rblk_fd, &dummy);
420 rblk->rblk_fd = -1;
421
422 return 0;
423 }
424
425 int
426 rumpblk_ioctl(dev_t dev, u_long xfer, void *addr, int flag, struct lwp *l)
427 {
428 struct rblkdev *rblk = &minors[minor(dev)];
429 int rv, error;
430
431 if (xfer == DIOCGPART) {
432 struct partinfo *pi = (struct partinfo *)addr;
433
434 pi->part = rblk->rblk_curpi;
435 pi->disklab = &rblk->rblk_dl;
436
437 return 0;
438 }
439
440 rv = rumpuser_ioctl(rblk->rblk_fd, xfer, addr, &error);
441 if (rv == -1)
442 return error;
443
444 return 0;
445 }
446
447 int
448 rumpblk_read(dev_t dev, struct uio *uio, int flags)
449 {
450
451 panic("%s: unimplemented", __func__);
452 }
453
454 int
455 rumpblk_write(dev_t dev, struct uio *uio, int flags)
456 {
457
458 panic("%s: unimplemented", __func__);
459 }
460
461 static void
462 dostrategy(struct buf *bp)
463 {
464 struct rblkdev *rblk = &minors[minor(bp->b_dev)];
465 off_t off;
466 int async, error;
467
468 off = bp->b_blkno << DEV_BSHIFT;
469 /*
470 * Do bounds checking if we're working on a file. Otherwise
471 * invalid file systems might attempt to read beyond EOF. This
472 * is bad(tm) especially on mmapped images. This is essentially
473 * the kernel bounds_check() routines.
474 */
475 if (rblk->rblk_size && off + bp->b_bcount > rblk->rblk_size) {
476 int64_t sz = rblk->rblk_size - off;
477
478 /* EOF */
479 if (sz == 0) {
480 rump_biodone(bp, 0, 0);
481 return;
482 }
483 /* beyond EOF ==> error */
484 if (sz < 0) {
485 rump_biodone(bp, 0, EINVAL);
486 return;
487 }
488
489 /* truncate to device size */
490 bp->b_bcount = sz;
491 }
492
493 async = bp->b_flags & B_ASYNC;
494 DPRINTF(("rumpblk_strategy: 0x%x bytes %s off 0x%" PRIx64
495 " (0x%" PRIx64 " - 0x%" PRIx64 ")\n",
496 bp->b_bcount, BUF_ISREAD(bp) ? "READ" : "WRITE",
497 off, off, (off + bp->b_bcount)));
498
499 /* mmap? handle here and return */
500 if (rblk->rblk_mmflags) {
501 struct blkwin *win;
502 int winsize, iodone;
503 uint8_t *ioaddr, *bufaddr;
504
505 for (iodone = 0; iodone < bp->b_bcount;
506 iodone += winsize, off += winsize) {
507 winsize = bp->b_bcount - iodone;
508 win = getwindow(rblk, off, &winsize, &error);
509 if (win == NULL) {
510 rump_biodone(bp, iodone, error);
511 return;
512 }
513
514 ioaddr = (uint8_t *)win->win_mem + (off-STARTWIN(off));
515 bufaddr = (uint8_t *)bp->b_data + iodone;
516
517 DPRINTF(("strat: %p off 0x%" PRIx64
518 ", ioaddr %p (%p)/buf %p\n", win,
519 win->win_off, ioaddr, win->win_mem, bufaddr));
520 if (BUF_ISREAD(bp)) {
521 memcpy(bufaddr, ioaddr, winsize);
522 } else {
523 memcpy(ioaddr, bufaddr, winsize);
524 }
525
526 /* synchronous write, sync bits back to disk */
527 if (BUF_ISWRITE(bp) && !async) {
528 rumpuser_memsync(ioaddr, winsize, &error);
529 }
530 putwindow(rblk, win);
531 }
532
533 rump_biodone(bp, bp->b_bcount, 0);
534 return;
535 }
536
537
538 /*
539 * Do I/O. We have different paths for async and sync I/O.
540 * Async I/O is done by passing a request to rumpuser where
541 * it is executed. The rumpuser routine then calls
542 * biodone() to signal any waiters in the kernel. I/O's are
543 * executed in series. Technically executing them in parallel
544 * would produce better results, but then we'd need either
545 * more threads or posix aio. Maybe worth investigating
546 * this later.
547 *
548 * Using bufq here might be a good idea.
549 */
550 if (rump_threads) {
551 struct rumpuser_aio *rua;
552
553 rumpuser_mutex_enter(&rumpuser_aio_mtx);
554 while ((rumpuser_aio_head+1) % N_AIOS == rumpuser_aio_tail)
555 rumpuser_cv_wait(&rumpuser_aio_cv, &rumpuser_aio_mtx);
556
557 rua = &rumpuser_aios[rumpuser_aio_head];
558 KASSERT(rua->rua_bp == NULL);
559 rua->rua_fd = rblk->rblk_fd;
560 rua->rua_data = bp->b_data;
561 rua->rua_dlen = bp->b_bcount;
562 rua->rua_off = off;
563 rua->rua_bp = bp;
564 rua->rua_op = BUF_ISREAD(bp);
565
566 /* insert into queue & signal */
567 rumpuser_aio_head = (rumpuser_aio_head+1) % N_AIOS;
568 rumpuser_cv_signal(&rumpuser_aio_cv);
569 rumpuser_mutex_exit(&rumpuser_aio_mtx);
570
571 /* make sure non-async writes end up on backing media */
572 if (BUF_ISWRITE(bp) && !async) {
573 biowait(bp);
574 rumpuser_fsync(rblk->rblk_fd, &error);
575 }
576 } else {
577 if (BUF_ISREAD(bp)) {
578 rumpuser_read_bio(rblk->rblk_fd, bp->b_data,
579 bp->b_bcount, off, rump_biodone, bp);
580 } else {
581 rumpuser_write_bio(rblk->rblk_fd, bp->b_data,
582 bp->b_bcount, off, rump_biodone, bp);
583 }
584 if (!async) {
585 if (BUF_ISWRITE(bp))
586 rumpuser_fsync(rblk->rblk_fd, &error);
587 }
588 }
589 }
590
591 void
592 rumpblk_strategy(struct buf *bp)
593 {
594
595 dostrategy(bp);
596 }
597
598 /*
599 * Simple random number generator. This is private so that we can
600 * very repeatedly control which blocks will fail.
601 *
602 * <mlelstv> pooka, rand()
603 * <mlelstv> [paste]
604 */
605 static unsigned
606 gimmerand(void)
607 {
608
609 return (randstate = randstate * 1103515245 + 12345) % (0x80000000L);
610 }
611
612 /*
613 * Block device with very simple fault injection. Fails every
614 * n out of BLKFAIL_MAX I/O with EIO. n is determined by the env
615 * variable RUMP_BLKFAIL.
616 */
617 void
618 rumpblk_strategy_fail(struct buf *bp)
619 {
620
621 if (gimmerand() % BLKFAIL_MAX >= blkfail) {
622 dostrategy(bp);
623 } else {
624 printf("block fault injection: failing I/O on block %lld\n",
625 (long long)bp->b_blkno);
626 bp->b_error = EIO;
627 biodone(bp);
628 }
629 }
630