rumpblk.c revision 1.64.2.1 1 /* $NetBSD: rumpblk.c,v 1.64.2.1 2016/07/18 03:50:00 pgoyette Exp $ */
2
3 /*
4 * Copyright (c) 2009 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Finnish Cultural Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /*
32 * Block device emulation. Presents a block device interface and
33 * uses rumpuser system calls to satisfy I/O requests.
34 *
35 * We provide fault injection. The driver can be made to fail
36 * I/O occasionally.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: rumpblk.c,v 1.64.2.1 2016/07/18 03:50:00 pgoyette Exp $");
41
42 #include <sys/param.h>
43 #include <sys/buf.h>
44 #include <sys/conf.h>
45 #include <sys/condvar.h>
46 #include <sys/disklabel.h>
47 #include <sys/evcnt.h>
48 #include <sys/fcntl.h>
49 #include <sys/kmem.h>
50 #include <sys/malloc.h>
51 #include <sys/queue.h>
52 #include <sys/stat.h>
53 #include <sys/cprng.h>
54 #include <sys/localcount.h>
55
56 #include <rump-sys/kern.h>
57 #include <rump-sys/vfs.h>
58
59 #include <rump/rumpuser.h>
60
61 #if 0
62 #define DPRINTF(x) printf x
63 #else
64 #define DPRINTF(x)
65 #endif
66
67 #define RUMPBLK_SIZE 16
68 static struct rblkdev {
69 char *rblk_path;
70 int rblk_fd;
71 int rblk_mode;
72
73 uint64_t rblk_size;
74 uint64_t rblk_hostoffset;
75 uint64_t rblk_hostsize;
76 int rblk_ftype;
77
78 struct disklabel rblk_label;
79 } minors[RUMPBLK_SIZE];
80
81 static struct evcnt ev_io_total;
82 static struct evcnt ev_io_async;
83
84 static struct evcnt ev_bwrite_total;
85 static struct evcnt ev_bwrite_async;
86 static struct evcnt ev_bread_total;
87
88 dev_type_open(rumpblk_open);
89 dev_type_close(rumpblk_close);
90 dev_type_read(rumpblk_read);
91 dev_type_write(rumpblk_write);
92 dev_type_ioctl(rumpblk_ioctl);
93 dev_type_strategy(rumpblk_strategy);
94 dev_type_strategy(rumpblk_strategy_fail);
95 dev_type_dump(rumpblk_dump);
96 dev_type_size(rumpblk_size);
97
98 #ifdef _MODULE
99 struct localcount rumpblk_b_localcount, rumpblk_c_localcount;
100 #endif
101
102 static const struct bdevsw rumpblk_bdevsw = {
103 .d_open = rumpblk_open,
104 .d_close = rumpblk_close,
105 .d_strategy = rumpblk_strategy,
106 .d_ioctl = rumpblk_ioctl,
107 .d_dump = nodump,
108 .d_psize = nosize,
109 .d_discard = nodiscard,
110 #ifdef _MODULE
111 .d_localcount = &rumpblk_b_localcount,
112 #endif
113 .d_flag = D_DISK
114 };
115
116 static const struct bdevsw rumpblk_bdevsw_fail = {
117 .d_open = rumpblk_open,
118 .d_close = rumpblk_close,
119 .d_strategy = rumpblk_strategy_fail,
120 .d_ioctl = rumpblk_ioctl,
121 .d_dump = nodump,
122 .d_psize = nosize,
123 .d_discard = nodiscard,
124 #ifdef _MODULE
125 .d_localcount = &rumpblk_b_localcount,
126 #endif
127 .d_flag = D_DISK
128 };
129
130 static const struct cdevsw rumpblk_cdevsw = {
131 .d_open = rumpblk_open,
132 .d_close = rumpblk_close,
133 .d_read = rumpblk_read,
134 .d_write = rumpblk_write,
135 .d_ioctl = rumpblk_ioctl,
136 .d_stop = nostop,
137 .d_tty = notty,
138 .d_poll = nopoll,
139 .d_mmap = nommap,
140 .d_kqfilter = nokqfilter,
141 .d_discard = nodiscard,
142 #ifdef _MODULE
143 .d_localcount = &rumpblk_c_localcount,
144 #endif
145 .d_flag = D_DISK
146 };
147
148 static int backend_open(struct rblkdev *, const char *);
149 static int backend_close(struct rblkdev *);
150
151 /* fail every n out of BLKFAIL_MAX */
152 #define BLKFAIL_MAX 10000
153 static int blkfail;
154 static unsigned randstate;
155 static kmutex_t rumpblk_lock;
156 static int sectshift = DEV_BSHIFT;
157
158 static void
159 makedefaultlabel(struct disklabel *lp, off_t size, int part)
160 {
161 int i;
162
163 memset(lp, 0, sizeof(*lp));
164
165 lp->d_secperunit = size;
166 lp->d_secsize = 1 << sectshift;
167 lp->d_nsectors = size >> sectshift;
168 lp->d_ntracks = 1;
169 lp->d_ncylinders = 1;
170 lp->d_secpercyl = lp->d_nsectors;
171
172 /* oh dear oh dear */
173 strncpy(lp->d_typename, "rumpd", sizeof(lp->d_typename));
174 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
175
176 lp->d_type = DKTYPE_RUMPD;
177 lp->d_rpm = 11;
178 lp->d_interleave = 1;
179 lp->d_flags = 0;
180
181 /* XXX: RAW_PART handling? */
182 for (i = 0; i < part; i++) {
183 lp->d_partitions[i].p_fstype = FS_UNUSED;
184 }
185 lp->d_partitions[part].p_size = size >> sectshift;
186 lp->d_npartitions = part+1;
187 /* XXX: file system type? */
188
189 lp->d_magic = DISKMAGIC;
190 lp->d_magic2 = DISKMAGIC;
191 lp->d_checksum = 0; /* XXX */
192 }
193
194 int
195 rumpblk_init(void)
196 {
197 char buf[64];
198 devmajor_t rumpblkmaj = RUMPBLK_DEVMAJOR;
199 unsigned tmp;
200 int i;
201
202 mutex_init(&rumpblk_lock, MUTEX_DEFAULT, IPL_NONE);
203
204 if (rumpuser_getparam("RUMP_BLKFAIL", buf, sizeof(buf)) == 0) {
205 blkfail = strtoul(buf, NULL, 10);
206 /* fail everything */
207 if (blkfail > BLKFAIL_MAX)
208 blkfail = BLKFAIL_MAX;
209 if (rumpuser_getparam("RUMP_BLKFAIL_SEED",
210 buf, sizeof(buf)) == 0) {
211 randstate = strtoul(buf, NULL, 10);
212 } else {
213 randstate = cprng_fast32();
214 }
215 printf("rumpblk: FAULT INJECTION ACTIVE! fail %d/%d. "
216 "seed %u\n", blkfail, BLKFAIL_MAX, randstate);
217 } else {
218 blkfail = 0;
219 }
220
221 if (rumpuser_getparam("RUMP_BLKSECTSHIFT", buf, sizeof(buf)) == 0) {
222 printf("rumpblk: ");
223 tmp = strtoul(buf, NULL, 10);
224 if (tmp >= DEV_BSHIFT)
225 sectshift = tmp;
226 else
227 printf("RUMP_BLKSECTSHIFT must be least %d (now %d), ",
228 DEV_BSHIFT, tmp);
229 printf("using %d for sector shift (size %d)\n",
230 sectshift, 1<<sectshift);
231 }
232
233 memset(minors, 0, sizeof(minors));
234 for (i = 0; i < RUMPBLK_SIZE; i++) {
235 minors[i].rblk_fd = -1;
236 }
237
238 evcnt_attach_dynamic(&ev_io_total, EVCNT_TYPE_MISC, NULL,
239 "rumpblk", "I/O reqs");
240 evcnt_attach_dynamic(&ev_io_async, EVCNT_TYPE_MISC, NULL,
241 "rumpblk", "async I/O");
242
243 evcnt_attach_dynamic(&ev_bread_total, EVCNT_TYPE_MISC, NULL,
244 "rumpblk", "bytes read");
245 evcnt_attach_dynamic(&ev_bwrite_total, EVCNT_TYPE_MISC, NULL,
246 "rumpblk", "bytes written");
247 evcnt_attach_dynamic(&ev_bwrite_async, EVCNT_TYPE_MISC, NULL,
248 "rumpblk", "bytes written async");
249
250 if (blkfail) {
251 return devsw_attach("rumpblk",
252 &rumpblk_bdevsw_fail, &rumpblkmaj,
253 &rumpblk_cdevsw, &rumpblkmaj);
254 } else {
255 return devsw_attach("rumpblk",
256 &rumpblk_bdevsw, &rumpblkmaj,
257 &rumpblk_cdevsw, &rumpblkmaj);
258 }
259 }
260
261 int
262 rumpblk_register(const char *path, devminor_t *dmin,
263 uint64_t offset, uint64_t size)
264 {
265 struct rblkdev *rblk;
266 uint64_t flen;
267 size_t len;
268 int ftype, error, i;
269
270 /* devices might not report correct size unless they're open */
271 if ((error = rumpuser_getfileinfo(path, &flen, &ftype)) != 0)
272 return error;
273
274 /* verify host file is of supported type */
275 if (!(ftype == RUMPUSER_FT_REG
276 || ftype == RUMPUSER_FT_BLK
277 || ftype == RUMPUSER_FT_CHR))
278 return EINVAL;
279
280 mutex_enter(&rumpblk_lock);
281 for (i = 0; i < RUMPBLK_SIZE; i++) {
282 if (minors[i].rblk_path&&strcmp(minors[i].rblk_path, path)==0) {
283 mutex_exit(&rumpblk_lock);
284 *dmin = i;
285 return 0;
286 }
287 }
288
289 for (i = 0; i < RUMPBLK_SIZE; i++)
290 if (minors[i].rblk_path == NULL)
291 break;
292 if (i == RUMPBLK_SIZE) {
293 mutex_exit(&rumpblk_lock);
294 return EBUSY;
295 }
296
297 rblk = &minors[i];
298 rblk->rblk_path = __UNCONST("taken");
299 mutex_exit(&rumpblk_lock);
300
301 len = strlen(path);
302 rblk->rblk_path = malloc(len + 1, M_TEMP, M_WAITOK);
303 strcpy(rblk->rblk_path, path);
304 rblk->rblk_hostoffset = offset;
305 if (size != RUMPBLK_SIZENOTSET) {
306 KASSERT(size + offset <= flen);
307 rblk->rblk_size = size;
308 } else {
309 KASSERT(offset < flen);
310 rblk->rblk_size = flen - offset;
311 }
312 rblk->rblk_hostsize = flen;
313 rblk->rblk_ftype = ftype;
314 makedefaultlabel(&rblk->rblk_label, rblk->rblk_size, i);
315
316 if ((error = backend_open(rblk, path)) != 0) {
317 memset(&rblk->rblk_label, 0, sizeof(rblk->rblk_label));
318 free(rblk->rblk_path, M_TEMP);
319 rblk->rblk_path = NULL;
320 return error;
321 }
322
323 *dmin = i;
324 return 0;
325 }
326
327 /*
328 * Unregister rumpblk. It's the callers responsibility to make
329 * sure it's no longer in use.
330 */
331 int
332 rumpblk_deregister(const char *path)
333 {
334 struct rblkdev *rblk;
335 int i;
336
337 mutex_enter(&rumpblk_lock);
338 for (i = 0; i < RUMPBLK_SIZE; i++) {
339 if (minors[i].rblk_path&&strcmp(minors[i].rblk_path, path)==0) {
340 break;
341 }
342 }
343 mutex_exit(&rumpblk_lock);
344
345 if (i == RUMPBLK_SIZE)
346 return ENOENT;
347
348 rblk = &minors[i];
349 backend_close(rblk);
350
351 free(rblk->rblk_path, M_TEMP);
352 memset(&rblk->rblk_label, 0, sizeof(rblk->rblk_label));
353 rblk->rblk_path = NULL;
354
355 return 0;
356 }
357
358 /*
359 * Release all backend resources, to be called only when the rump
360 * kernel is being shut down.
361 * This routine does not do a full "fini" since we're going down anyway.
362 */
363 void
364 rumpblk_fini(void)
365 {
366 int i;
367
368 for (i = 0; i < RUMPBLK_SIZE; i++) {
369 struct rblkdev *rblk;
370
371 rblk = &minors[i];
372 if (rblk->rblk_fd != -1)
373 backend_close(rblk);
374 }
375 }
376
377 static int
378 backend_open(struct rblkdev *rblk, const char *path)
379 {
380 int error, fd;
381
382 KASSERT(rblk->rblk_fd == -1);
383 error = rumpuser_open(path,
384 RUMPUSER_OPEN_RDWR | RUMPUSER_OPEN_BIO, &fd);
385 if (error) {
386 error = rumpuser_open(path,
387 RUMPUSER_OPEN_RDONLY | RUMPUSER_OPEN_BIO, &fd);
388 if (error)
389 return error;
390 rblk->rblk_mode = FREAD;
391 } else {
392 rblk->rblk_mode = FREAD|FWRITE;
393 }
394
395 rblk->rblk_fd = fd;
396 KASSERT(rblk->rblk_fd != -1);
397 return 0;
398 }
399
400 static int
401 backend_close(struct rblkdev *rblk)
402 {
403
404 rumpuser_close(rblk->rblk_fd);
405 rblk->rblk_fd = -1;
406
407 return 0;
408 }
409
410 int
411 rumpblk_open(dev_t dev, int flag, int fmt, struct lwp *l)
412 {
413 struct rblkdev *rblk = &minors[minor(dev)];
414
415 if (rblk->rblk_fd == -1)
416 return ENXIO;
417
418 if (((flag & (FREAD|FWRITE)) & ~rblk->rblk_mode) != 0) {
419 return EACCES;
420 }
421
422 return 0;
423 }
424
425 int
426 rumpblk_close(dev_t dev, int flag, int fmt, struct lwp *l)
427 {
428
429 return 0;
430 }
431
432 int
433 rumpblk_ioctl(dev_t dev, u_long xfer, void *addr, int flag, struct lwp *l)
434 {
435 devminor_t dmin = minor(dev);
436 struct rblkdev *rblk = &minors[dmin];
437 struct partinfo *pi;
438 struct partition *dp;
439 int error = 0;
440
441 /* well, me should support a few more, but we don't for now */
442 switch (xfer) {
443 case DIOCGDINFO:
444 *(struct disklabel *)addr = rblk->rblk_label;
445 break;
446
447 case DIOCGPARTINFO:
448 dp = &rblk->rblk_label.d_partitions[DISKPART(dmin)];
449 pi = addr;
450 pi->pi_offset = dp->p_offset;
451 pi->pi_size = dp->p_size;
452 pi->pi_secsize = rblk->rblk_label.d_secsize;
453 pi->pi_bsize = BLKDEV_IOSIZE;
454 pi->pi_fstype = dp->p_fstype;
455 pi->pi_fsize = dp->p_fsize;
456 pi->pi_frag = dp->p_frag;
457 pi->pi_cpg = dp->p_cpg;
458 break;
459
460 /* it's synced enough along the write path */
461 case DIOCCACHESYNC:
462 break;
463
464 case DIOCGMEDIASIZE:
465 *(off_t *)addr = (off_t)rblk->rblk_size;
466 break;
467
468 default:
469 error = ENOTTY;
470 break;
471 }
472
473 return error;
474 }
475
476 static int
477 do_physio(dev_t dev, struct uio *uio, int which)
478 {
479 void (*strat)(struct buf *);
480
481 if (blkfail)
482 strat = rumpblk_strategy_fail;
483 else
484 strat = rumpblk_strategy;
485
486 return physio(strat, NULL, dev, which, minphys, uio);
487 }
488
489 int
490 rumpblk_read(dev_t dev, struct uio *uio, int flags)
491 {
492
493 return do_physio(dev, uio, B_READ);
494 }
495
496 int
497 rumpblk_write(dev_t dev, struct uio *uio, int flags)
498 {
499
500 return do_physio(dev, uio, B_WRITE);
501 }
502
503 static void
504 dostrategy(struct buf *bp)
505 {
506 struct rblkdev *rblk = &minors[minor(bp->b_dev)];
507 off_t off;
508 int async = bp->b_flags & B_ASYNC;
509 int op;
510
511 if (bp->b_bcount % (1<<sectshift) != 0) {
512 rump_biodone(bp, 0, EINVAL);
513 return;
514 }
515
516 /* collect statistics */
517 ev_io_total.ev_count++;
518 if (async)
519 ev_io_async.ev_count++;
520 if (BUF_ISWRITE(bp)) {
521 ev_bwrite_total.ev_count += bp->b_bcount;
522 if (async)
523 ev_bwrite_async.ev_count += bp->b_bcount;
524 } else {
525 ev_bread_total.ev_count++;
526 }
527
528 /*
529 * b_blkno is always in terms of DEV_BSIZE, and since we need
530 * to translate to a byte offset for the host read, this
531 * calculation does not need sectshift.
532 */
533 off = bp->b_blkno << DEV_BSHIFT;
534
535 /*
536 * Do bounds checking if we're working on a file. Otherwise
537 * invalid file systems might attempt to read beyond EOF. This
538 * is bad(tm) especially on mmapped images. This is essentially
539 * the kernel bounds_check() routines.
540 */
541 if (off + bp->b_bcount > rblk->rblk_size) {
542 int64_t sz = rblk->rblk_size - off;
543
544 /* EOF */
545 if (sz == 0) {
546 rump_biodone(bp, 0, 0);
547 return;
548 }
549 /* beyond EOF ==> error */
550 if (sz < 0) {
551 rump_biodone(bp, 0, EINVAL);
552 return;
553 }
554
555 /* truncate to device size */
556 bp->b_bcount = sz;
557 }
558
559 off += rblk->rblk_hostoffset;
560 DPRINTF(("rumpblk_strategy: 0x%x bytes %s off 0x%" PRIx64
561 " (0x%" PRIx64 " - 0x%" PRIx64 "), %ssync\n",
562 bp->b_bcount, BUF_ISREAD(bp) ? "READ" : "WRITE",
563 off, off, (off + bp->b_bcount), async ? "a" : ""));
564
565 op = BUF_ISREAD(bp) ? RUMPUSER_BIO_READ : RUMPUSER_BIO_WRITE;
566 if (BUF_ISWRITE(bp) && !async)
567 op |= RUMPUSER_BIO_SYNC;
568
569 rumpuser_bio(rblk->rblk_fd, op, bp->b_data, bp->b_bcount, off,
570 rump_biodone, bp);
571 }
572
573 void
574 rumpblk_strategy(struct buf *bp)
575 {
576
577 dostrategy(bp);
578 }
579
580 /*
581 * Simple random number generator. This is private so that we can
582 * very repeatedly control which blocks will fail.
583 *
584 * <mlelstv> pooka, rand()
585 * <mlelstv> [paste]
586 */
587 static unsigned
588 gimmerand(void)
589 {
590
591 return (randstate = randstate * 1103515245 + 12345) % (0x80000000L);
592 }
593
594 /*
595 * Block device with very simple fault injection. Fails every
596 * n out of BLKFAIL_MAX I/O with EIO. n is determined by the env
597 * variable RUMP_BLKFAIL.
598 */
599 void
600 rumpblk_strategy_fail(struct buf *bp)
601 {
602
603 if (gimmerand() % BLKFAIL_MAX >= blkfail) {
604 dostrategy(bp);
605 } else {
606 printf("block fault injection: failing I/O on block %lld\n",
607 (long long)bp->b_blkno);
608 bp->b_error = EIO;
609 biodone(bp);
610 }
611 }
612