Home | History | Annotate | Line # | Download | only in rumpvfs
rumpblk.c revision 1.23.2.6
      1  1.23.2.6  yamt /*	$NetBSD: rumpblk.c,v 1.23.2.6 2010/10/09 03:32:44 yamt Exp $	*/
      2  1.23.2.2  yamt 
      3  1.23.2.2  yamt /*
      4  1.23.2.2  yamt  * Copyright (c) 2009 Antti Kantee.  All Rights Reserved.
      5  1.23.2.2  yamt  *
      6  1.23.2.2  yamt  * Development of this software was supported by the
      7  1.23.2.2  yamt  * Finnish Cultural Foundation.
      8  1.23.2.2  yamt  *
      9  1.23.2.2  yamt  * Redistribution and use in source and binary forms, with or without
     10  1.23.2.2  yamt  * modification, are permitted provided that the following conditions
     11  1.23.2.2  yamt  * are met:
     12  1.23.2.2  yamt  * 1. Redistributions of source code must retain the above copyright
     13  1.23.2.2  yamt  *    notice, this list of conditions and the following disclaimer.
     14  1.23.2.2  yamt  * 2. Redistributions in binary form must reproduce the above copyright
     15  1.23.2.2  yamt  *    notice, this list of conditions and the following disclaimer in the
     16  1.23.2.2  yamt  *    documentation and/or other materials provided with the distribution.
     17  1.23.2.2  yamt  *
     18  1.23.2.2  yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     19  1.23.2.2  yamt  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     20  1.23.2.2  yamt  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     21  1.23.2.2  yamt  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     22  1.23.2.2  yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     23  1.23.2.2  yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     24  1.23.2.2  yamt  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     25  1.23.2.2  yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     26  1.23.2.2  yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  1.23.2.2  yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  1.23.2.2  yamt  * SUCH DAMAGE.
     29  1.23.2.2  yamt  */
     30  1.23.2.2  yamt 
     31  1.23.2.2  yamt /*
     32  1.23.2.2  yamt  * Block device emulation.  Presents a block device interface and
     33  1.23.2.2  yamt  * uses rumpuser system calls to satisfy I/O requests.
     34  1.23.2.2  yamt  *
     35  1.23.2.2  yamt  * We provide fault injection.  The driver can be made to fail
     36  1.23.2.2  yamt  * I/O occasionally.
     37  1.23.2.2  yamt  *
     38  1.23.2.2  yamt  * The driver also provides an optimization for regular files by
     39  1.23.2.2  yamt  * using memory-mapped I/O.  This avoids kernel access for every
     40  1.23.2.2  yamt  * I/O operation.  It also gives finer-grained control of how to
     41  1.23.2.2  yamt  * flush data.  Additionally, in case the rump kernel dumps core,
     42  1.23.2.2  yamt  * we get way less carnage.
     43  1.23.2.4  yamt  *
     44  1.23.2.4  yamt  * However, it is quite costly in writing large amounts of
     45  1.23.2.4  yamt  * file data, since old contents cannot merely be overwritten, but
     46  1.23.2.4  yamt  * must be paged in first before replacing (i.e. r/m/w).  Ideally,
     47  1.23.2.4  yamt  * we should use directio.  The problem is that directio can fail
     48  1.23.2.4  yamt  * silently causing improper file system semantics (i.e. unflushed
     49  1.23.2.4  yamt  * data).  Therefore, default to mmap for now.  Even so, directio
     50  1.23.2.4  yamt  * _should_ be safe and can be enabled by compiling this module
     51  1.23.2.4  yamt  * with -DHAS_DIRECTIO.
     52  1.23.2.2  yamt  */
     53  1.23.2.2  yamt 
     54  1.23.2.2  yamt #include <sys/cdefs.h>
     55  1.23.2.6  yamt __KERNEL_RCSID(0, "$NetBSD: rumpblk.c,v 1.23.2.6 2010/10/09 03:32:44 yamt Exp $");
     56  1.23.2.2  yamt 
     57  1.23.2.2  yamt #include <sys/param.h>
     58  1.23.2.2  yamt #include <sys/buf.h>
     59  1.23.2.2  yamt #include <sys/conf.h>
     60  1.23.2.2  yamt #include <sys/condvar.h>
     61  1.23.2.2  yamt #include <sys/disklabel.h>
     62  1.23.2.2  yamt #include <sys/evcnt.h>
     63  1.23.2.2  yamt #include <sys/fcntl.h>
     64  1.23.2.2  yamt #include <sys/kmem.h>
     65  1.23.2.2  yamt #include <sys/malloc.h>
     66  1.23.2.2  yamt #include <sys/queue.h>
     67  1.23.2.2  yamt #include <sys/stat.h>
     68  1.23.2.2  yamt 
     69  1.23.2.2  yamt #include <rump/rumpuser.h>
     70  1.23.2.2  yamt 
     71  1.23.2.2  yamt #include "rump_private.h"
     72  1.23.2.2  yamt #include "rump_vfs_private.h"
     73  1.23.2.2  yamt 
     74  1.23.2.2  yamt #if 0
     75  1.23.2.2  yamt #define DPRINTF(x) printf x
     76  1.23.2.2  yamt #else
     77  1.23.2.2  yamt #define DPRINTF(x)
     78  1.23.2.2  yamt #endif
     79  1.23.2.2  yamt 
     80  1.23.2.2  yamt /* Default: 16 x 1MB windows */
     81  1.23.2.2  yamt unsigned memwinsize = (1<<20);
     82  1.23.2.2  yamt unsigned memwincnt = 16;
     83  1.23.2.2  yamt 
     84  1.23.2.4  yamt #define STARTWIN(off)		((off) & ~((off_t)memwinsize-1))
     85  1.23.2.2  yamt #define INWIN(win,off)		((win)->win_off == STARTWIN(off))
     86  1.23.2.5  yamt #define WINSIZE(rblk, win)	(MIN((rblk->rblk_hostsize-win->win_off), \
     87  1.23.2.5  yamt 				      memwinsize))
     88  1.23.2.2  yamt #define WINVALID(win)		((win)->win_off != (off_t)-1)
     89  1.23.2.2  yamt #define WINVALIDATE(win)	((win)->win_off = (off_t)-1)
     90  1.23.2.2  yamt struct blkwin {
     91  1.23.2.2  yamt 	off_t win_off;
     92  1.23.2.2  yamt 	void *win_mem;
     93  1.23.2.2  yamt 	int win_refcnt;
     94  1.23.2.2  yamt 
     95  1.23.2.2  yamt 	TAILQ_ENTRY(blkwin) win_lru;
     96  1.23.2.2  yamt };
     97  1.23.2.2  yamt 
     98  1.23.2.2  yamt #define RUMPBLK_SIZE 16
     99  1.23.2.2  yamt static struct rblkdev {
    100  1.23.2.2  yamt 	char *rblk_path;
    101  1.23.2.2  yamt 	int rblk_fd;
    102  1.23.2.3  yamt 	int rblk_opencnt;
    103  1.23.2.2  yamt #ifdef HAS_ODIRECT
    104  1.23.2.2  yamt 	int rblk_dfd;
    105  1.23.2.2  yamt #endif
    106  1.23.2.4  yamt 	uint64_t rblk_size;
    107  1.23.2.4  yamt 	uint64_t rblk_hostoffset;
    108  1.23.2.5  yamt 	uint64_t rblk_hostsize;
    109  1.23.2.4  yamt 	int rblk_ftype;
    110  1.23.2.2  yamt 
    111  1.23.2.2  yamt 	/* for mmap */
    112  1.23.2.2  yamt 	int rblk_mmflags;
    113  1.23.2.2  yamt 	kmutex_t rblk_memmtx;
    114  1.23.2.2  yamt 	kcondvar_t rblk_memcv;
    115  1.23.2.2  yamt 	TAILQ_HEAD(winlru, blkwin) rblk_lruq;
    116  1.23.2.2  yamt 	bool rblk_waiting;
    117  1.23.2.2  yamt 
    118  1.23.2.4  yamt 	struct disklabel rblk_label;
    119  1.23.2.2  yamt } minors[RUMPBLK_SIZE];
    120  1.23.2.2  yamt 
    121  1.23.2.2  yamt static struct evcnt ev_io_total;
    122  1.23.2.2  yamt static struct evcnt ev_io_async;
    123  1.23.2.2  yamt 
    124  1.23.2.2  yamt static struct evcnt ev_memblk_hits;
    125  1.23.2.2  yamt static struct evcnt ev_memblk_busy;
    126  1.23.2.2  yamt 
    127  1.23.2.2  yamt static struct evcnt ev_bwrite_total;
    128  1.23.2.2  yamt static struct evcnt ev_bwrite_async;
    129  1.23.2.2  yamt static struct evcnt ev_bread_total;
    130  1.23.2.2  yamt 
    131  1.23.2.2  yamt dev_type_open(rumpblk_open);
    132  1.23.2.2  yamt dev_type_close(rumpblk_close);
    133  1.23.2.2  yamt dev_type_read(rumpblk_read);
    134  1.23.2.2  yamt dev_type_write(rumpblk_write);
    135  1.23.2.2  yamt dev_type_ioctl(rumpblk_ioctl);
    136  1.23.2.2  yamt dev_type_strategy(rumpblk_strategy);
    137  1.23.2.2  yamt dev_type_strategy(rumpblk_strategy_fail);
    138  1.23.2.2  yamt dev_type_dump(rumpblk_dump);
    139  1.23.2.2  yamt dev_type_size(rumpblk_size);
    140  1.23.2.2  yamt 
    141  1.23.2.2  yamt static const struct bdevsw rumpblk_bdevsw = {
    142  1.23.2.2  yamt 	rumpblk_open, rumpblk_close, rumpblk_strategy, rumpblk_ioctl,
    143  1.23.2.2  yamt 	nodump, nosize, D_DISK
    144  1.23.2.2  yamt };
    145  1.23.2.2  yamt 
    146  1.23.2.2  yamt static const struct bdevsw rumpblk_bdevsw_fail = {
    147  1.23.2.2  yamt 	rumpblk_open, rumpblk_close, rumpblk_strategy_fail, rumpblk_ioctl,
    148  1.23.2.2  yamt 	nodump, nosize, D_DISK
    149  1.23.2.2  yamt };
    150  1.23.2.2  yamt 
    151  1.23.2.2  yamt static const struct cdevsw rumpblk_cdevsw = {
    152  1.23.2.2  yamt 	rumpblk_open, rumpblk_close, rumpblk_read, rumpblk_write,
    153  1.23.2.2  yamt 	rumpblk_ioctl, nostop, notty, nopoll, nommap, nokqfilter, D_DISK
    154  1.23.2.2  yamt };
    155  1.23.2.2  yamt 
    156  1.23.2.2  yamt /* fail every n out of BLKFAIL_MAX */
    157  1.23.2.2  yamt #define BLKFAIL_MAX 10000
    158  1.23.2.2  yamt static int blkfail;
    159  1.23.2.2  yamt static unsigned randstate;
    160  1.23.2.3  yamt static kmutex_t rumpblk_lock;
    161  1.23.2.4  yamt static int sectshift = DEV_BSHIFT;
    162  1.23.2.4  yamt 
    163  1.23.2.4  yamt static void
    164  1.23.2.4  yamt makedefaultlabel(struct disklabel *lp, off_t size, int part)
    165  1.23.2.4  yamt {
    166  1.23.2.4  yamt 	int i;
    167  1.23.2.4  yamt 
    168  1.23.2.4  yamt 	memset(lp, 0, sizeof(*lp));
    169  1.23.2.4  yamt 
    170  1.23.2.4  yamt 	lp->d_secperunit = size;
    171  1.23.2.4  yamt 	lp->d_secsize = 1 << sectshift;
    172  1.23.2.4  yamt 	lp->d_nsectors = size >> sectshift;
    173  1.23.2.4  yamt 	lp->d_ntracks = 1;
    174  1.23.2.4  yamt 	lp->d_ncylinders = 1;
    175  1.23.2.4  yamt 	lp->d_secpercyl = lp->d_nsectors;
    176  1.23.2.4  yamt 
    177  1.23.2.4  yamt 	/* oh dear oh dear */
    178  1.23.2.4  yamt 	strncpy(lp->d_typename, "rumpd", sizeof(lp->d_typename));
    179  1.23.2.4  yamt 	strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
    180  1.23.2.4  yamt 
    181  1.23.2.4  yamt 	lp->d_type = DTYPE_RUMPD;
    182  1.23.2.4  yamt 	lp->d_rpm = 11;
    183  1.23.2.4  yamt 	lp->d_interleave = 1;
    184  1.23.2.4  yamt 	lp->d_flags = 0;
    185  1.23.2.4  yamt 
    186  1.23.2.4  yamt 	/* XXX: RAW_PART handling? */
    187  1.23.2.4  yamt 	for (i = 0; i < part; i++) {
    188  1.23.2.4  yamt 		lp->d_partitions[i].p_fstype = FS_UNUSED;
    189  1.23.2.4  yamt 	}
    190  1.23.2.4  yamt 	lp->d_partitions[part].p_size = size >> sectshift;
    191  1.23.2.4  yamt 	lp->d_npartitions = part+1;
    192  1.23.2.4  yamt 	/* XXX: file system type? */
    193  1.23.2.4  yamt 
    194  1.23.2.4  yamt 	lp->d_magic = DISKMAGIC;
    195  1.23.2.4  yamt 	lp->d_magic2 = DISKMAGIC;
    196  1.23.2.4  yamt 	lp->d_checksum = 0; /* XXX */
    197  1.23.2.4  yamt }
    198  1.23.2.2  yamt 
    199  1.23.2.2  yamt static struct blkwin *
    200  1.23.2.2  yamt getwindow(struct rblkdev *rblk, off_t off, int *wsize, int *error)
    201  1.23.2.2  yamt {
    202  1.23.2.2  yamt 	struct blkwin *win;
    203  1.23.2.2  yamt 
    204  1.23.2.2  yamt 	mutex_enter(&rblk->rblk_memmtx);
    205  1.23.2.2  yamt  retry:
    206  1.23.2.2  yamt 	/* search for window */
    207  1.23.2.2  yamt 	TAILQ_FOREACH(win, &rblk->rblk_lruq, win_lru) {
    208  1.23.2.2  yamt 		if (INWIN(win, off) && WINVALID(win))
    209  1.23.2.2  yamt 			break;
    210  1.23.2.2  yamt 	}
    211  1.23.2.2  yamt 
    212  1.23.2.2  yamt 	/* found?  return */
    213  1.23.2.2  yamt 	if (win) {
    214  1.23.2.2  yamt 		ev_memblk_hits.ev_count++;
    215  1.23.2.2  yamt 		TAILQ_REMOVE(&rblk->rblk_lruq, win, win_lru);
    216  1.23.2.2  yamt 		goto good;
    217  1.23.2.2  yamt 	}
    218  1.23.2.2  yamt 
    219  1.23.2.2  yamt 	/*
    220  1.23.2.2  yamt 	 * Else, create new window.  If the least recently used is not
    221  1.23.2.2  yamt 	 * currently in use, reuse that.  Otherwise we need to wait.
    222  1.23.2.2  yamt 	 */
    223  1.23.2.2  yamt 	win = TAILQ_LAST(&rblk->rblk_lruq, winlru);
    224  1.23.2.2  yamt 	if (win->win_refcnt == 0) {
    225  1.23.2.2  yamt 		TAILQ_REMOVE(&rblk->rblk_lruq, win, win_lru);
    226  1.23.2.2  yamt 		mutex_exit(&rblk->rblk_memmtx);
    227  1.23.2.2  yamt 
    228  1.23.2.2  yamt 		if (WINVALID(win)) {
    229  1.23.2.2  yamt 			DPRINTF(("win %p, unmap mem %p, off 0x%" PRIx64 "\n",
    230  1.23.2.2  yamt 			    win, win->win_mem, win->win_off));
    231  1.23.2.2  yamt 			rumpuser_unmap(win->win_mem, WINSIZE(rblk, win));
    232  1.23.2.2  yamt 			WINVALIDATE(win);
    233  1.23.2.2  yamt 		}
    234  1.23.2.2  yamt 
    235  1.23.2.2  yamt 		win->win_off = STARTWIN(off);
    236  1.23.2.2  yamt 		win->win_mem = rumpuser_filemmap(rblk->rblk_fd, win->win_off,
    237  1.23.2.2  yamt 		    WINSIZE(rblk, win), rblk->rblk_mmflags, error);
    238  1.23.2.2  yamt 		DPRINTF(("win %p, off 0x%" PRIx64 ", mem %p\n",
    239  1.23.2.2  yamt 		    win, win->win_off, win->win_mem));
    240  1.23.2.2  yamt 
    241  1.23.2.2  yamt 		mutex_enter(&rblk->rblk_memmtx);
    242  1.23.2.2  yamt 		if (win->win_mem == NULL) {
    243  1.23.2.2  yamt 			WINVALIDATE(win);
    244  1.23.2.2  yamt 			TAILQ_INSERT_TAIL(&rblk->rblk_lruq, win, win_lru);
    245  1.23.2.2  yamt 			mutex_exit(&rblk->rblk_memmtx);
    246  1.23.2.2  yamt 			return NULL;
    247  1.23.2.2  yamt 		}
    248  1.23.2.2  yamt 	} else {
    249  1.23.2.2  yamt 		DPRINTF(("memwin wait\n"));
    250  1.23.2.2  yamt 		ev_memblk_busy.ev_count++;
    251  1.23.2.2  yamt 
    252  1.23.2.2  yamt 		rblk->rblk_waiting = true;
    253  1.23.2.2  yamt 		cv_wait(&rblk->rblk_memcv, &rblk->rblk_memmtx);
    254  1.23.2.2  yamt 		goto retry;
    255  1.23.2.2  yamt 	}
    256  1.23.2.2  yamt 
    257  1.23.2.2  yamt  good:
    258  1.23.2.2  yamt 	KASSERT(win);
    259  1.23.2.2  yamt 	win->win_refcnt++;
    260  1.23.2.2  yamt 	TAILQ_INSERT_HEAD(&rblk->rblk_lruq, win, win_lru);
    261  1.23.2.2  yamt 	mutex_exit(&rblk->rblk_memmtx);
    262  1.23.2.2  yamt 	*wsize = MIN(*wsize, memwinsize - (off-win->win_off));
    263  1.23.2.2  yamt 	KASSERT(*wsize);
    264  1.23.2.2  yamt 
    265  1.23.2.2  yamt 	return win;
    266  1.23.2.2  yamt }
    267  1.23.2.2  yamt 
    268  1.23.2.2  yamt static void
    269  1.23.2.2  yamt putwindow(struct rblkdev *rblk, struct blkwin *win)
    270  1.23.2.2  yamt {
    271  1.23.2.2  yamt 
    272  1.23.2.2  yamt 	mutex_enter(&rblk->rblk_memmtx);
    273  1.23.2.2  yamt 	if (--win->win_refcnt == 0 && rblk->rblk_waiting) {
    274  1.23.2.2  yamt 		rblk->rblk_waiting = false;
    275  1.23.2.6  yamt 		cv_broadcast(&rblk->rblk_memcv);
    276  1.23.2.2  yamt 	}
    277  1.23.2.2  yamt 	KASSERT(win->win_refcnt >= 0);
    278  1.23.2.2  yamt 	mutex_exit(&rblk->rblk_memmtx);
    279  1.23.2.2  yamt }
    280  1.23.2.2  yamt 
    281  1.23.2.2  yamt static void
    282  1.23.2.2  yamt wincleanup(struct rblkdev *rblk)
    283  1.23.2.2  yamt {
    284  1.23.2.2  yamt 	struct blkwin *win;
    285  1.23.2.2  yamt 
    286  1.23.2.2  yamt 	while ((win = TAILQ_FIRST(&rblk->rblk_lruq)) != NULL) {
    287  1.23.2.2  yamt 		TAILQ_REMOVE(&rblk->rblk_lruq, win, win_lru);
    288  1.23.2.2  yamt 		if (WINVALID(win)) {
    289  1.23.2.2  yamt 			DPRINTF(("cleanup win %p addr %p\n",
    290  1.23.2.2  yamt 			    win, win->win_mem));
    291  1.23.2.2  yamt 			rumpuser_unmap(win->win_mem, WINSIZE(rblk, win));
    292  1.23.2.2  yamt 		}
    293  1.23.2.2  yamt 		kmem_free(win, sizeof(*win));
    294  1.23.2.2  yamt 	}
    295  1.23.2.2  yamt 	rblk->rblk_mmflags = 0;
    296  1.23.2.2  yamt }
    297  1.23.2.2  yamt 
    298  1.23.2.2  yamt int
    299  1.23.2.2  yamt rumpblk_init(void)
    300  1.23.2.2  yamt {
    301  1.23.2.2  yamt 	char buf[64];
    302  1.23.2.5  yamt 	devmajor_t rumpblkmaj = RUMPBLK_DEVMAJOR;
    303  1.23.2.2  yamt 	unsigned tmp;
    304  1.23.2.2  yamt 	int error, i;
    305  1.23.2.2  yamt 
    306  1.23.2.3  yamt 	mutex_init(&rumpblk_lock, MUTEX_DEFAULT, IPL_NONE);
    307  1.23.2.3  yamt 
    308  1.23.2.2  yamt 	if (rumpuser_getenv("RUMP_BLKFAIL", buf, sizeof(buf), &error) == 0) {
    309  1.23.2.2  yamt 		blkfail = strtoul(buf, NULL, 10);
    310  1.23.2.2  yamt 		/* fail everything */
    311  1.23.2.2  yamt 		if (blkfail > BLKFAIL_MAX)
    312  1.23.2.2  yamt 			blkfail = BLKFAIL_MAX;
    313  1.23.2.2  yamt 		if (rumpuser_getenv("RUMP_BLKFAIL_SEED", buf, sizeof(buf),
    314  1.23.2.2  yamt 		    &error) == 0) {
    315  1.23.2.2  yamt 			randstate = strtoul(buf, NULL, 10);
    316  1.23.2.2  yamt 		} else {
    317  1.23.2.2  yamt 			randstate = arc4random();
    318  1.23.2.2  yamt 		}
    319  1.23.2.2  yamt 		printf("rumpblk: FAULT INJECTION ACTIVE! fail %d/%d. "
    320  1.23.2.2  yamt 		    "seed %u\n", blkfail, BLKFAIL_MAX, randstate);
    321  1.23.2.2  yamt 	} else {
    322  1.23.2.2  yamt 		blkfail = 0;
    323  1.23.2.2  yamt 	}
    324  1.23.2.2  yamt 
    325  1.23.2.2  yamt 	if (rumpuser_getenv("RUMP_BLKWINSIZE", buf, sizeof(buf), &error) == 0) {
    326  1.23.2.2  yamt 		printf("rumpblk: ");
    327  1.23.2.2  yamt 		tmp = strtoul(buf, NULL, 10);
    328  1.23.2.2  yamt 		if (tmp && !(tmp & (tmp-1)))
    329  1.23.2.2  yamt 			memwinsize = tmp;
    330  1.23.2.2  yamt 		else
    331  1.23.2.2  yamt 			printf("invalid RUMP_BLKWINSIZE %d, ", tmp);
    332  1.23.2.2  yamt 		printf("using %d for memwinsize\n", memwinsize);
    333  1.23.2.2  yamt 	}
    334  1.23.2.2  yamt 	if (rumpuser_getenv("RUMP_BLKWINCOUNT", buf, sizeof(buf), &error) == 0){
    335  1.23.2.2  yamt 		printf("rumpblk: ");
    336  1.23.2.2  yamt 		tmp = strtoul(buf, NULL, 10);
    337  1.23.2.2  yamt 		if (tmp)
    338  1.23.2.2  yamt 			memwincnt = tmp;
    339  1.23.2.2  yamt 		else
    340  1.23.2.2  yamt 			printf("invalid RUMP_BLKWINCOUNT %d, ", tmp);
    341  1.23.2.2  yamt 		printf("using %d for memwincount\n", memwincnt);
    342  1.23.2.2  yamt 	}
    343  1.23.2.4  yamt 	if (rumpuser_getenv("RUMP_BLKSECTSHIFT", buf, sizeof(buf), &error)==0){
    344  1.23.2.4  yamt 		printf("rumpblk: ");
    345  1.23.2.4  yamt 		tmp = strtoul(buf, NULL, 10);
    346  1.23.2.4  yamt 		if (tmp >= DEV_BSHIFT)
    347  1.23.2.4  yamt 			sectshift = tmp;
    348  1.23.2.4  yamt 		else
    349  1.23.2.4  yamt 			printf("RUMP_BLKSECTSHIFT must be least %d (now %d), ",
    350  1.23.2.4  yamt 			   DEV_BSHIFT, tmp);
    351  1.23.2.4  yamt 		printf("using %d for sector shift (size %d)\n",
    352  1.23.2.4  yamt 		    sectshift, 1<<sectshift);
    353  1.23.2.4  yamt 	}
    354  1.23.2.2  yamt 
    355  1.23.2.2  yamt 	memset(minors, 0, sizeof(minors));
    356  1.23.2.2  yamt 	for (i = 0; i < RUMPBLK_SIZE; i++) {
    357  1.23.2.2  yamt 		mutex_init(&minors[i].rblk_memmtx, MUTEX_DEFAULT, IPL_NONE);
    358  1.23.2.2  yamt 		cv_init(&minors[i].rblk_memcv, "rblkmcv");
    359  1.23.2.2  yamt 	}
    360  1.23.2.2  yamt 
    361  1.23.2.2  yamt 	evcnt_attach_dynamic(&ev_io_total, EVCNT_TYPE_MISC, NULL,
    362  1.23.2.5  yamt 	    "rumpblk", "I/O reqs");
    363  1.23.2.2  yamt 	evcnt_attach_dynamic(&ev_io_async, EVCNT_TYPE_MISC, NULL,
    364  1.23.2.5  yamt 	    "rumpblk", "async I/O");
    365  1.23.2.2  yamt 
    366  1.23.2.2  yamt 	evcnt_attach_dynamic(&ev_bread_total, EVCNT_TYPE_MISC, NULL,
    367  1.23.2.5  yamt 	    "rumpblk", "bytes read");
    368  1.23.2.2  yamt 	evcnt_attach_dynamic(&ev_bwrite_total, EVCNT_TYPE_MISC, NULL,
    369  1.23.2.5  yamt 	    "rumpblk", "bytes written");
    370  1.23.2.2  yamt 	evcnt_attach_dynamic(&ev_bwrite_async, EVCNT_TYPE_MISC, NULL,
    371  1.23.2.5  yamt 	    "rumpblk", "bytes written async");
    372  1.23.2.2  yamt 
    373  1.23.2.2  yamt 	evcnt_attach_dynamic(&ev_memblk_hits, EVCNT_TYPE_MISC, NULL,
    374  1.23.2.5  yamt 	    "rumpblk", "window hits");
    375  1.23.2.2  yamt 	evcnt_attach_dynamic(&ev_memblk_busy, EVCNT_TYPE_MISC, NULL,
    376  1.23.2.5  yamt 	    "rumpblk", "all windows busy");
    377  1.23.2.2  yamt 
    378  1.23.2.2  yamt 	if (blkfail) {
    379  1.23.2.5  yamt 		return devsw_attach("rumpblk",
    380  1.23.2.5  yamt 		    &rumpblk_bdevsw_fail, &rumpblkmaj,
    381  1.23.2.5  yamt 		    &rumpblk_cdevsw, &rumpblkmaj);
    382  1.23.2.2  yamt 	} else {
    383  1.23.2.5  yamt 		return devsw_attach("rumpblk",
    384  1.23.2.5  yamt 		    &rumpblk_bdevsw, &rumpblkmaj,
    385  1.23.2.5  yamt 		    &rumpblk_cdevsw, &rumpblkmaj);
    386  1.23.2.2  yamt 	}
    387  1.23.2.2  yamt }
    388  1.23.2.2  yamt 
    389  1.23.2.2  yamt int
    390  1.23.2.4  yamt rumpblk_register(const char *path, devminor_t *dmin,
    391  1.23.2.4  yamt 	uint64_t offset, uint64_t size)
    392  1.23.2.2  yamt {
    393  1.23.2.4  yamt 	struct rblkdev *rblk;
    394  1.23.2.3  yamt 	uint64_t flen;
    395  1.23.2.2  yamt 	size_t len;
    396  1.23.2.3  yamt 	int ftype, error, i;
    397  1.23.2.2  yamt 
    398  1.23.2.4  yamt 	/* devices might not report correct size unless they're open */
    399  1.23.2.4  yamt 	if (rumpuser_getfileinfo(path, &flen, &ftype, &error) == -1)
    400  1.23.2.3  yamt 		return error;
    401  1.23.2.4  yamt 
    402  1.23.2.3  yamt 	/* verify host file is of supported type */
    403  1.23.2.3  yamt 	if (!(ftype == RUMPUSER_FT_REG
    404  1.23.2.3  yamt 	   || ftype == RUMPUSER_FT_BLK
    405  1.23.2.3  yamt 	   || ftype == RUMPUSER_FT_CHR))
    406  1.23.2.3  yamt 		return EINVAL;
    407  1.23.2.3  yamt 
    408  1.23.2.3  yamt 	mutex_enter(&rumpblk_lock);
    409  1.23.2.3  yamt 	for (i = 0; i < RUMPBLK_SIZE; i++) {
    410  1.23.2.3  yamt 		if (minors[i].rblk_path&&strcmp(minors[i].rblk_path, path)==0) {
    411  1.23.2.3  yamt 			mutex_exit(&rumpblk_lock);
    412  1.23.2.3  yamt 			*dmin = i;
    413  1.23.2.3  yamt 			return 0;
    414  1.23.2.3  yamt 		}
    415  1.23.2.3  yamt 	}
    416  1.23.2.2  yamt 
    417  1.23.2.2  yamt 	for (i = 0; i < RUMPBLK_SIZE; i++)
    418  1.23.2.2  yamt 		if (minors[i].rblk_path == NULL)
    419  1.23.2.2  yamt 			break;
    420  1.23.2.3  yamt 	if (i == RUMPBLK_SIZE) {
    421  1.23.2.3  yamt 		mutex_exit(&rumpblk_lock);
    422  1.23.2.3  yamt 		return EBUSY;
    423  1.23.2.3  yamt 	}
    424  1.23.2.2  yamt 
    425  1.23.2.4  yamt 	rblk = &minors[i];
    426  1.23.2.2  yamt 	len = strlen(path);
    427  1.23.2.4  yamt 	rblk->rblk_path = malloc(len + 1, M_TEMP, M_WAITOK);
    428  1.23.2.4  yamt 	strcpy(rblk->rblk_path, path);
    429  1.23.2.4  yamt 	rblk->rblk_fd = -1;
    430  1.23.2.4  yamt 	rblk->rblk_hostoffset = offset;
    431  1.23.2.4  yamt 	if (size != RUMPBLK_SIZENOTSET) {
    432  1.23.2.4  yamt 		KASSERT(size + offset <= flen);
    433  1.23.2.4  yamt 		rblk->rblk_size = size;
    434  1.23.2.4  yamt 	} else {
    435  1.23.2.4  yamt 		KASSERT(offset < flen);
    436  1.23.2.4  yamt 		rblk->rblk_size = flen - offset;
    437  1.23.2.4  yamt 	}
    438  1.23.2.5  yamt 	rblk->rblk_hostsize = flen;
    439  1.23.2.4  yamt 	rblk->rblk_ftype = ftype;
    440  1.23.2.4  yamt 	makedefaultlabel(&rblk->rblk_label, rblk->rblk_size, i);
    441  1.23.2.3  yamt 	mutex_exit(&rumpblk_lock);
    442  1.23.2.3  yamt 
    443  1.23.2.3  yamt 	*dmin = i;
    444  1.23.2.3  yamt 	return 0;
    445  1.23.2.2  yamt }
    446  1.23.2.2  yamt 
    447  1.23.2.5  yamt /*
    448  1.23.2.5  yamt  * Unregister rumpblk.  It's the callers responsibility to make
    449  1.23.2.5  yamt  * sure it's no longer in use.
    450  1.23.2.5  yamt  */
    451  1.23.2.5  yamt int
    452  1.23.2.5  yamt rumpblk_deregister(const char *path)
    453  1.23.2.5  yamt {
    454  1.23.2.5  yamt 	struct rblkdev *rblk;
    455  1.23.2.5  yamt 	int i;
    456  1.23.2.5  yamt 
    457  1.23.2.5  yamt 	mutex_enter(&rumpblk_lock);
    458  1.23.2.5  yamt 	for (i = 0; i < RUMPBLK_SIZE; i++) {
    459  1.23.2.5  yamt 		if (minors[i].rblk_path&&strcmp(minors[i].rblk_path, path)==0) {
    460  1.23.2.5  yamt 			break;
    461  1.23.2.5  yamt 		}
    462  1.23.2.5  yamt 	}
    463  1.23.2.5  yamt 	mutex_exit(&rumpblk_lock);
    464  1.23.2.5  yamt 
    465  1.23.2.5  yamt 	if (i == RUMPBLK_SIZE)
    466  1.23.2.5  yamt 		return ENOENT;
    467  1.23.2.5  yamt 
    468  1.23.2.5  yamt 	rblk = &minors[i];
    469  1.23.2.5  yamt 	KASSERT(rblk->rblk_fd == -1);
    470  1.23.2.5  yamt 	KASSERT(rblk->rblk_opencnt == 0);
    471  1.23.2.5  yamt 
    472  1.23.2.5  yamt 	wincleanup(rblk);
    473  1.23.2.5  yamt 	free(rblk->rblk_path, M_TEMP);
    474  1.23.2.5  yamt 	rblk->rblk_path = NULL;
    475  1.23.2.5  yamt 	memset(&rblk->rblk_label, 0, sizeof(rblk->rblk_label));
    476  1.23.2.5  yamt 
    477  1.23.2.5  yamt 	return 0;
    478  1.23.2.5  yamt }
    479  1.23.2.5  yamt 
    480  1.23.2.2  yamt int
    481  1.23.2.2  yamt rumpblk_open(dev_t dev, int flag, int fmt, struct lwp *l)
    482  1.23.2.2  yamt {
    483  1.23.2.2  yamt 	struct rblkdev *rblk = &minors[minor(dev)];
    484  1.23.2.2  yamt 	int error, fd;
    485  1.23.2.2  yamt 
    486  1.23.2.4  yamt 	if (rblk->rblk_path == NULL)
    487  1.23.2.4  yamt 		return ENXIO;
    488  1.23.2.4  yamt 
    489  1.23.2.3  yamt 	if (rblk->rblk_fd != -1)
    490  1.23.2.3  yamt 		return 0; /* XXX: refcount, open mode */
    491  1.23.2.2  yamt 	fd = rumpuser_open(rblk->rblk_path, OFLAGS(flag), &error);
    492  1.23.2.2  yamt 	if (error)
    493  1.23.2.2  yamt 		return error;
    494  1.23.2.2  yamt 
    495  1.23.2.2  yamt #ifdef HAS_ODIRECT
    496  1.23.2.2  yamt 	rblk->rblk_dfd = rumpuser_open(rblk->rblk_path,
    497  1.23.2.2  yamt 	    OFLAGS(flag) | O_DIRECT, &error);
    498  1.23.2.2  yamt 	if (error)
    499  1.23.2.2  yamt 		return error;
    500  1.23.2.2  yamt #endif
    501  1.23.2.2  yamt 
    502  1.23.2.4  yamt 	if (rblk->rblk_ftype == RUMPUSER_FT_REG) {
    503  1.23.2.4  yamt 		uint64_t fsize = rblk->rblk_size, off = rblk->rblk_hostoffset;
    504  1.23.2.2  yamt 		struct blkwin *win;
    505  1.23.2.2  yamt 		int i, winsize;
    506  1.23.2.2  yamt 
    507  1.23.2.2  yamt 		/*
    508  1.23.2.2  yamt 		 * Use mmap to access a regular file.  Allocate and
    509  1.23.2.2  yamt 		 * cache initial windows here.  Failure to allocate one
    510  1.23.2.2  yamt 		 * means fallback to read/write i/o.
    511  1.23.2.2  yamt 		 */
    512  1.23.2.2  yamt 
    513  1.23.2.2  yamt 		rblk->rblk_mmflags = 0;
    514  1.23.2.2  yamt 		if (flag & FREAD)
    515  1.23.2.2  yamt 			rblk->rblk_mmflags |= RUMPUSER_FILEMMAP_READ;
    516  1.23.2.2  yamt 		if (flag & FWRITE) {
    517  1.23.2.2  yamt 			rblk->rblk_mmflags |= RUMPUSER_FILEMMAP_WRITE;
    518  1.23.2.2  yamt 			rblk->rblk_mmflags |= RUMPUSER_FILEMMAP_SHARED;
    519  1.23.2.2  yamt 		}
    520  1.23.2.2  yamt 
    521  1.23.2.2  yamt 		TAILQ_INIT(&rblk->rblk_lruq);
    522  1.23.2.2  yamt 		rblk->rblk_fd = fd;
    523  1.23.2.2  yamt 
    524  1.23.2.4  yamt 		for (i = 0; i < memwincnt && off + i*memwinsize < fsize; i++) {
    525  1.23.2.2  yamt 			win = kmem_zalloc(sizeof(*win), KM_SLEEP);
    526  1.23.2.2  yamt 			WINVALIDATE(win);
    527  1.23.2.2  yamt 			TAILQ_INSERT_TAIL(&rblk->rblk_lruq, win, win_lru);
    528  1.23.2.2  yamt 
    529  1.23.2.2  yamt 			/*
    530  1.23.2.2  yamt 			 * Allocate first windows.  Here we just generally
    531  1.23.2.2  yamt 			 * make sure a) we can mmap at all b) we have the
    532  1.23.2.2  yamt 			 * necessary VA available
    533  1.23.2.2  yamt 			 */
    534  1.23.2.4  yamt 			winsize = memwinsize;
    535  1.23.2.4  yamt 			win = getwindow(rblk, off + i*memwinsize, &winsize,
    536  1.23.2.4  yamt 			    &error);
    537  1.23.2.2  yamt 			if (win) {
    538  1.23.2.2  yamt 				putwindow(rblk, win);
    539  1.23.2.2  yamt 			} else {
    540  1.23.2.2  yamt 				wincleanup(rblk);
    541  1.23.2.2  yamt 				break;
    542  1.23.2.2  yamt 			}
    543  1.23.2.2  yamt 		}
    544  1.23.2.2  yamt 	} else {
    545  1.23.2.2  yamt 		rblk->rblk_fd = fd;
    546  1.23.2.2  yamt 	}
    547  1.23.2.2  yamt 
    548  1.23.2.2  yamt 	KASSERT(rblk->rblk_fd != -1);
    549  1.23.2.2  yamt 	return 0;
    550  1.23.2.2  yamt }
    551  1.23.2.2  yamt 
    552  1.23.2.2  yamt int
    553  1.23.2.2  yamt rumpblk_close(dev_t dev, int flag, int fmt, struct lwp *l)
    554  1.23.2.2  yamt {
    555  1.23.2.2  yamt 	struct rblkdev *rblk = &minors[minor(dev)];
    556  1.23.2.2  yamt 	int dummy;
    557  1.23.2.2  yamt 
    558  1.23.2.2  yamt 	if (rblk->rblk_mmflags)
    559  1.23.2.2  yamt 		wincleanup(rblk);
    560  1.23.2.2  yamt 	rumpuser_fsync(rblk->rblk_fd, &dummy);
    561  1.23.2.2  yamt 	rumpuser_close(rblk->rblk_fd, &dummy);
    562  1.23.2.2  yamt 	rblk->rblk_fd = -1;
    563  1.23.2.2  yamt 
    564  1.23.2.2  yamt 	return 0;
    565  1.23.2.2  yamt }
    566  1.23.2.2  yamt 
    567  1.23.2.2  yamt int
    568  1.23.2.2  yamt rumpblk_ioctl(dev_t dev, u_long xfer, void *addr, int flag, struct lwp *l)
    569  1.23.2.2  yamt {
    570  1.23.2.4  yamt 	devminor_t dmin = minor(dev);
    571  1.23.2.4  yamt 	struct rblkdev *rblk = &minors[dmin];
    572  1.23.2.4  yamt 	struct partinfo *pi;
    573  1.23.2.4  yamt 	int error = 0;
    574  1.23.2.4  yamt 
    575  1.23.2.4  yamt 	/* well, me should support a few more, but we don't for now */
    576  1.23.2.4  yamt 	switch (xfer) {
    577  1.23.2.4  yamt 	case DIOCGDINFO:
    578  1.23.2.4  yamt 		*(struct disklabel *)addr = rblk->rblk_label;
    579  1.23.2.4  yamt 		break;
    580  1.23.2.4  yamt 
    581  1.23.2.4  yamt 	case DIOCGPART:
    582  1.23.2.4  yamt 		pi = addr;
    583  1.23.2.4  yamt 		pi->part = &rblk->rblk_label.d_partitions[DISKPART(dmin)];
    584  1.23.2.4  yamt 		pi->disklab = &rblk->rblk_label;
    585  1.23.2.4  yamt 		break;
    586  1.23.2.4  yamt 
    587  1.23.2.4  yamt 	/* it's synced enough along the write path */
    588  1.23.2.4  yamt 	case DIOCCACHESYNC:
    589  1.23.2.4  yamt 		break;
    590  1.23.2.4  yamt 
    591  1.23.2.4  yamt 	default:
    592  1.23.2.4  yamt 		error = ENOTTY;
    593  1.23.2.4  yamt 		break;
    594  1.23.2.2  yamt 	}
    595  1.23.2.2  yamt 
    596  1.23.2.4  yamt 	return error;
    597  1.23.2.2  yamt }
    598  1.23.2.2  yamt 
    599  1.23.2.3  yamt static int
    600  1.23.2.3  yamt do_physio(dev_t dev, struct uio *uio, int which)
    601  1.23.2.3  yamt {
    602  1.23.2.3  yamt 	void (*strat)(struct buf *);
    603  1.23.2.3  yamt 
    604  1.23.2.3  yamt 	if (blkfail)
    605  1.23.2.3  yamt 		strat = rumpblk_strategy_fail;
    606  1.23.2.3  yamt 	else
    607  1.23.2.3  yamt 		strat = rumpblk_strategy;
    608  1.23.2.3  yamt 
    609  1.23.2.3  yamt 	return physio(strat, NULL, dev, which, minphys, uio);
    610  1.23.2.3  yamt }
    611  1.23.2.3  yamt 
    612  1.23.2.2  yamt int
    613  1.23.2.2  yamt rumpblk_read(dev_t dev, struct uio *uio, int flags)
    614  1.23.2.2  yamt {
    615  1.23.2.2  yamt 
    616  1.23.2.3  yamt 	return do_physio(dev, uio, B_READ);
    617  1.23.2.2  yamt }
    618  1.23.2.2  yamt 
    619  1.23.2.2  yamt int
    620  1.23.2.2  yamt rumpblk_write(dev_t dev, struct uio *uio, int flags)
    621  1.23.2.2  yamt {
    622  1.23.2.2  yamt 
    623  1.23.2.3  yamt 	return do_physio(dev, uio, B_WRITE);
    624  1.23.2.2  yamt }
    625  1.23.2.2  yamt 
    626  1.23.2.2  yamt static void
    627  1.23.2.2  yamt dostrategy(struct buf *bp)
    628  1.23.2.2  yamt {
    629  1.23.2.2  yamt 	struct rblkdev *rblk = &minors[minor(bp->b_dev)];
    630  1.23.2.2  yamt 	off_t off;
    631  1.23.2.2  yamt 	int async = bp->b_flags & B_ASYNC;
    632  1.23.2.2  yamt 	int error;
    633  1.23.2.2  yamt 
    634  1.23.2.2  yamt 	/* collect statistics */
    635  1.23.2.2  yamt 	ev_io_total.ev_count++;
    636  1.23.2.2  yamt 	if (async)
    637  1.23.2.2  yamt 		ev_io_async.ev_count++;
    638  1.23.2.2  yamt 	if (BUF_ISWRITE(bp)) {
    639  1.23.2.2  yamt 		ev_bwrite_total.ev_count += bp->b_bcount;
    640  1.23.2.2  yamt 		if (async)
    641  1.23.2.2  yamt 			ev_bwrite_async.ev_count += bp->b_bcount;
    642  1.23.2.2  yamt 	} else {
    643  1.23.2.2  yamt 		ev_bread_total.ev_count++;
    644  1.23.2.2  yamt 	}
    645  1.23.2.2  yamt 
    646  1.23.2.4  yamt 	off = bp->b_blkno << sectshift;
    647  1.23.2.2  yamt 	/*
    648  1.23.2.2  yamt 	 * Do bounds checking if we're working on a file.  Otherwise
    649  1.23.2.2  yamt 	 * invalid file systems might attempt to read beyond EOF.  This
    650  1.23.2.2  yamt 	 * is bad(tm) especially on mmapped images.  This is essentially
    651  1.23.2.2  yamt 	 * the kernel bounds_check() routines.
    652  1.23.2.2  yamt 	 */
    653  1.23.2.4  yamt 	if (off + bp->b_bcount > rblk->rblk_size) {
    654  1.23.2.2  yamt 		int64_t sz = rblk->rblk_size - off;
    655  1.23.2.2  yamt 
    656  1.23.2.2  yamt 		/* EOF */
    657  1.23.2.2  yamt 		if (sz == 0) {
    658  1.23.2.2  yamt 			rump_biodone(bp, 0, 0);
    659  1.23.2.2  yamt 			return;
    660  1.23.2.2  yamt 		}
    661  1.23.2.2  yamt 		/* beyond EOF ==> error */
    662  1.23.2.2  yamt 		if (sz < 0) {
    663  1.23.2.2  yamt 			rump_biodone(bp, 0, EINVAL);
    664  1.23.2.2  yamt 			return;
    665  1.23.2.2  yamt 		}
    666  1.23.2.2  yamt 
    667  1.23.2.2  yamt 		/* truncate to device size */
    668  1.23.2.2  yamt 		bp->b_bcount = sz;
    669  1.23.2.2  yamt 	}
    670  1.23.2.2  yamt 
    671  1.23.2.4  yamt 	off += rblk->rblk_hostoffset;
    672  1.23.2.2  yamt 	DPRINTF(("rumpblk_strategy: 0x%x bytes %s off 0x%" PRIx64
    673  1.23.2.2  yamt 	    " (0x%" PRIx64 " - 0x%" PRIx64 "), %ssync\n",
    674  1.23.2.2  yamt 	    bp->b_bcount, BUF_ISREAD(bp) ? "READ" : "WRITE",
    675  1.23.2.2  yamt 	    off, off, (off + bp->b_bcount), async ? "a" : ""));
    676  1.23.2.2  yamt 
    677  1.23.2.2  yamt 	/* mmap?  handle here and return */
    678  1.23.2.2  yamt 	if (rblk->rblk_mmflags) {
    679  1.23.2.2  yamt 		struct blkwin *win;
    680  1.23.2.2  yamt 		int winsize, iodone;
    681  1.23.2.2  yamt 		uint8_t *ioaddr, *bufaddr;
    682  1.23.2.2  yamt 
    683  1.23.2.2  yamt 		for (iodone = 0; iodone < bp->b_bcount;
    684  1.23.2.2  yamt 		    iodone += winsize, off += winsize) {
    685  1.23.2.2  yamt 			winsize = bp->b_bcount - iodone;
    686  1.23.2.2  yamt 			win = getwindow(rblk, off, &winsize, &error);
    687  1.23.2.2  yamt 			if (win == NULL) {
    688  1.23.2.2  yamt 				rump_biodone(bp, iodone, error);
    689  1.23.2.2  yamt 				return;
    690  1.23.2.2  yamt 			}
    691  1.23.2.2  yamt 
    692  1.23.2.2  yamt 			ioaddr = (uint8_t *)win->win_mem + (off-STARTWIN(off));
    693  1.23.2.2  yamt 			bufaddr = (uint8_t *)bp->b_data + iodone;
    694  1.23.2.2  yamt 
    695  1.23.2.2  yamt 			DPRINTF(("strat: %p off 0x%" PRIx64
    696  1.23.2.2  yamt 			    ", ioaddr %p (%p)/buf %p\n", win,
    697  1.23.2.2  yamt 			    win->win_off, ioaddr, win->win_mem, bufaddr));
    698  1.23.2.2  yamt 			if (BUF_ISREAD(bp)) {
    699  1.23.2.2  yamt 				memcpy(bufaddr, ioaddr, winsize);
    700  1.23.2.2  yamt 			} else {
    701  1.23.2.2  yamt 				memcpy(ioaddr, bufaddr, winsize);
    702  1.23.2.2  yamt 			}
    703  1.23.2.2  yamt 
    704  1.23.2.2  yamt 			/* synchronous write, sync bits back to disk */
    705  1.23.2.2  yamt 			if (BUF_ISWRITE(bp) && !async) {
    706  1.23.2.2  yamt 				rumpuser_memsync(ioaddr, winsize, &error);
    707  1.23.2.2  yamt 			}
    708  1.23.2.2  yamt 			putwindow(rblk, win);
    709  1.23.2.2  yamt 		}
    710  1.23.2.2  yamt 
    711  1.23.2.2  yamt 		rump_biodone(bp, bp->b_bcount, 0);
    712  1.23.2.2  yamt 		return;
    713  1.23.2.2  yamt 	}
    714  1.23.2.2  yamt 
    715  1.23.2.2  yamt 	/*
    716  1.23.2.2  yamt 	 * Do I/O.  We have different paths for async and sync I/O.
    717  1.23.2.2  yamt 	 * Async I/O is done by passing a request to rumpuser where
    718  1.23.2.2  yamt 	 * it is executed.  The rumpuser routine then calls
    719  1.23.2.2  yamt 	 * biodone() to signal any waiters in the kernel.  I/O's are
    720  1.23.2.2  yamt 	 * executed in series.  Technically executing them in parallel
    721  1.23.2.2  yamt 	 * would produce better results, but then we'd need either
    722  1.23.2.2  yamt 	 * more threads or posix aio.  Maybe worth investigating
    723  1.23.2.2  yamt 	 * this later.
    724  1.23.2.2  yamt 	 *
    725  1.23.2.2  yamt 	 * Using bufq here might be a good idea.
    726  1.23.2.2  yamt 	 */
    727  1.23.2.2  yamt 
    728  1.23.2.2  yamt 	if (rump_threads) {
    729  1.23.2.2  yamt 		struct rumpuser_aio *rua;
    730  1.23.2.2  yamt 		int op, fd;
    731  1.23.2.2  yamt 
    732  1.23.2.2  yamt 		fd = rblk->rblk_fd;
    733  1.23.2.2  yamt 		if (BUF_ISREAD(bp)) {
    734  1.23.2.2  yamt 			op = RUA_OP_READ;
    735  1.23.2.2  yamt 		} else {
    736  1.23.2.2  yamt 			op = RUA_OP_WRITE;
    737  1.23.2.2  yamt 			if (!async) {
    738  1.23.2.2  yamt 				/* O_DIRECT not fully automatic yet */
    739  1.23.2.2  yamt #ifdef HAS_ODIRECT
    740  1.23.2.4  yamt 				if ((off & ((1<<sectshift)-1)) == 0
    741  1.23.2.4  yamt 				    && ((intptr_t)bp->b_data
    742  1.23.2.4  yamt 				      & ((1<<sectshift)-1)) == 0
    743  1.23.2.4  yamt 				    && (bp->b_bcount & ((1<<sectshift)-1)) == 0)
    744  1.23.2.2  yamt 					fd = rblk->rblk_dfd;
    745  1.23.2.2  yamt 				else
    746  1.23.2.2  yamt #endif
    747  1.23.2.2  yamt 					op |= RUA_OP_SYNC;
    748  1.23.2.2  yamt 			}
    749  1.23.2.2  yamt 		}
    750  1.23.2.2  yamt 
    751  1.23.2.2  yamt 		rumpuser_mutex_enter(&rumpuser_aio_mtx);
    752  1.23.2.2  yamt 		while ((rumpuser_aio_head+1) % N_AIOS == rumpuser_aio_tail) {
    753  1.23.2.2  yamt 			rumpuser_cv_wait(&rumpuser_aio_cv, &rumpuser_aio_mtx);
    754  1.23.2.2  yamt 		}
    755  1.23.2.2  yamt 
    756  1.23.2.2  yamt 		rua = &rumpuser_aios[rumpuser_aio_head];
    757  1.23.2.2  yamt 		KASSERT(rua->rua_bp == NULL);
    758  1.23.2.2  yamt 		rua->rua_fd = fd;
    759  1.23.2.2  yamt 		rua->rua_data = bp->b_data;
    760  1.23.2.2  yamt 		rua->rua_dlen = bp->b_bcount;
    761  1.23.2.2  yamt 		rua->rua_off = off;
    762  1.23.2.2  yamt 		rua->rua_bp = bp;
    763  1.23.2.2  yamt 		rua->rua_op = op;
    764  1.23.2.2  yamt 
    765  1.23.2.2  yamt 		/* insert into queue & signal */
    766  1.23.2.2  yamt 		rumpuser_aio_head = (rumpuser_aio_head+1) % N_AIOS;
    767  1.23.2.2  yamt 		rumpuser_cv_signal(&rumpuser_aio_cv);
    768  1.23.2.2  yamt 		rumpuser_mutex_exit(&rumpuser_aio_mtx);
    769  1.23.2.2  yamt 	} else {
    770  1.23.2.2  yamt 		if (BUF_ISREAD(bp)) {
    771  1.23.2.2  yamt 			rumpuser_read_bio(rblk->rblk_fd, bp->b_data,
    772  1.23.2.2  yamt 			    bp->b_bcount, off, rump_biodone, bp);
    773  1.23.2.2  yamt 		} else {
    774  1.23.2.2  yamt 			rumpuser_write_bio(rblk->rblk_fd, bp->b_data,
    775  1.23.2.2  yamt 			    bp->b_bcount, off, rump_biodone, bp);
    776  1.23.2.2  yamt 		}
    777  1.23.2.2  yamt 		if (BUF_ISWRITE(bp) && !async)
    778  1.23.2.2  yamt 			rumpuser_fsync(rblk->rblk_fd, &error);
    779  1.23.2.2  yamt 	}
    780  1.23.2.2  yamt }
    781  1.23.2.2  yamt 
    782  1.23.2.2  yamt void
    783  1.23.2.2  yamt rumpblk_strategy(struct buf *bp)
    784  1.23.2.2  yamt {
    785  1.23.2.2  yamt 
    786  1.23.2.2  yamt 	dostrategy(bp);
    787  1.23.2.2  yamt }
    788  1.23.2.2  yamt 
    789  1.23.2.2  yamt /*
    790  1.23.2.2  yamt  * Simple random number generator.  This is private so that we can
    791  1.23.2.2  yamt  * very repeatedly control which blocks will fail.
    792  1.23.2.2  yamt  *
    793  1.23.2.2  yamt  * <mlelstv> pooka, rand()
    794  1.23.2.2  yamt  * <mlelstv> [paste]
    795  1.23.2.2  yamt  */
    796  1.23.2.2  yamt static unsigned
    797  1.23.2.2  yamt gimmerand(void)
    798  1.23.2.2  yamt {
    799  1.23.2.2  yamt 
    800  1.23.2.2  yamt 	return (randstate = randstate * 1103515245 + 12345) % (0x80000000L);
    801  1.23.2.2  yamt }
    802  1.23.2.2  yamt 
    803  1.23.2.2  yamt /*
    804  1.23.2.2  yamt  * Block device with very simple fault injection.  Fails every
    805  1.23.2.2  yamt  * n out of BLKFAIL_MAX I/O with EIO.  n is determined by the env
    806  1.23.2.2  yamt  * variable RUMP_BLKFAIL.
    807  1.23.2.2  yamt  */
    808  1.23.2.2  yamt void
    809  1.23.2.2  yamt rumpblk_strategy_fail(struct buf *bp)
    810  1.23.2.2  yamt {
    811  1.23.2.2  yamt 
    812  1.23.2.2  yamt 	if (gimmerand() % BLKFAIL_MAX >= blkfail) {
    813  1.23.2.2  yamt 		dostrategy(bp);
    814  1.23.2.2  yamt 	} else {
    815  1.23.2.2  yamt 		printf("block fault injection: failing I/O on block %lld\n",
    816  1.23.2.2  yamt 		    (long long)bp->b_blkno);
    817  1.23.2.2  yamt 		bp->b_error = EIO;
    818  1.23.2.2  yamt 		biodone(bp);
    819  1.23.2.2  yamt 	}
    820  1.23.2.2  yamt }
    821